1 | /* |
2 | * videobuf2-core.c - video buffer 2 core framework |
3 | * |
4 | * Copyright (C) 2010 Samsung Electronics |
5 | * |
6 | * Author: Pawel Osciak <pawel@osciak.com> |
7 | * Marek Szyprowski <m.szyprowski@samsung.com> |
8 | * |
9 | * The vb2_thread implementation was based on code from videobuf-dvb.c: |
10 | * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License as published by |
14 | * the Free Software Foundation. |
15 | */ |
16 | |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | |
19 | #include <linux/err.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/poll.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/sched.h> |
26 | #include <linux/freezer.h> |
27 | #include <linux/kthread.h> |
28 | |
29 | #include <media/videobuf2-core.h> |
30 | #include <media/v4l2-mc.h> |
31 | |
32 | #include <trace/events/vb2.h> |
33 | |
34 | #define PLANE_INDEX_BITS 3 |
35 | #define PLANE_INDEX_SHIFT (PAGE_SHIFT + PLANE_INDEX_BITS) |
36 | #define PLANE_INDEX_MASK (BIT_MASK(PLANE_INDEX_BITS) - 1) |
37 | #define MAX_BUFFER_INDEX BIT_MASK(30 - PLANE_INDEX_SHIFT) |
38 | #define BUFFER_INDEX_MASK (MAX_BUFFER_INDEX - 1) |
39 | |
40 | #if BIT(PLANE_INDEX_BITS) != VIDEO_MAX_PLANES |
41 | #error PLANE_INDEX_BITS order must be equal to VIDEO_MAX_PLANES |
42 | #endif |
43 | |
44 | static int debug; |
45 | module_param(debug, int, 0644); |
46 | |
47 | #define dprintk(q, level, fmt, arg...) \ |
48 | do { \ |
49 | if (debug >= level) \ |
50 | pr_info("[%s] %s: " fmt, (q)->name, __func__, \ |
51 | ## arg); \ |
52 | } while (0) |
53 | |
54 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
55 | |
56 | /* |
57 | * If advanced debugging is on, then count how often each op is called |
58 | * successfully, which can either be per-buffer or per-queue. |
59 | * |
60 | * This makes it easy to check that the 'init' and 'cleanup' |
61 | * (and variations thereof) stay balanced. |
62 | */ |
63 | |
64 | #define log_memop(vb, op) \ |
65 | dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ |
66 | (vb)->index, #op, \ |
67 | (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") |
68 | |
69 | #define call_memop(vb, op, args...) \ |
70 | ({ \ |
71 | struct vb2_queue *_q = (vb)->vb2_queue; \ |
72 | int err; \ |
73 | \ |
74 | log_memop(vb, op); \ |
75 | err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ |
76 | if (!err) \ |
77 | (vb)->cnt_mem_ ## op++; \ |
78 | err; \ |
79 | }) |
80 | |
81 | #define call_ptr_memop(op, vb, args...) \ |
82 | ({ \ |
83 | struct vb2_queue *_q = (vb)->vb2_queue; \ |
84 | void *ptr; \ |
85 | \ |
86 | log_memop(vb, op); \ |
87 | ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \ |
88 | if (!IS_ERR_OR_NULL(ptr)) \ |
89 | (vb)->cnt_mem_ ## op++; \ |
90 | ptr; \ |
91 | }) |
92 | |
93 | #define call_void_memop(vb, op, args...) \ |
94 | ({ \ |
95 | struct vb2_queue *_q = (vb)->vb2_queue; \ |
96 | \ |
97 | log_memop(vb, op); \ |
98 | if (_q->mem_ops->op) \ |
99 | _q->mem_ops->op(args); \ |
100 | (vb)->cnt_mem_ ## op++; \ |
101 | }) |
102 | |
103 | #define log_qop(q, op) \ |
104 | dprintk(q, 2, "call_qop(%s)%s\n", #op, \ |
105 | (q)->ops->op ? "" : " (nop)") |
106 | |
107 | #define call_qop(q, op, args...) \ |
108 | ({ \ |
109 | int err; \ |
110 | \ |
111 | log_qop(q, op); \ |
112 | err = (q)->ops->op ? (q)->ops->op(args) : 0; \ |
113 | if (!err) \ |
114 | (q)->cnt_ ## op++; \ |
115 | err; \ |
116 | }) |
117 | |
118 | #define call_void_qop(q, op, args...) \ |
119 | ({ \ |
120 | log_qop(q, op); \ |
121 | if ((q)->ops->op) \ |
122 | (q)->ops->op(args); \ |
123 | (q)->cnt_ ## op++; \ |
124 | }) |
125 | |
126 | #define log_vb_qop(vb, op, args...) \ |
127 | dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ |
128 | (vb)->index, #op, \ |
129 | (vb)->vb2_queue->ops->op ? "" : " (nop)") |
130 | |
131 | #define call_vb_qop(vb, op, args...) \ |
132 | ({ \ |
133 | int err; \ |
134 | \ |
135 | log_vb_qop(vb, op); \ |
136 | err = (vb)->vb2_queue->ops->op ? \ |
137 | (vb)->vb2_queue->ops->op(args) : 0; \ |
138 | if (!err) \ |
139 | (vb)->cnt_ ## op++; \ |
140 | err; \ |
141 | }) |
142 | |
143 | #define call_void_vb_qop(vb, op, args...) \ |
144 | ({ \ |
145 | log_vb_qop(vb, op); \ |
146 | if ((vb)->vb2_queue->ops->op) \ |
147 | (vb)->vb2_queue->ops->op(args); \ |
148 | (vb)->cnt_ ## op++; \ |
149 | }) |
150 | |
151 | #else |
152 | |
153 | #define call_memop(vb, op, args...) \ |
154 | ((vb)->vb2_queue->mem_ops->op ? \ |
155 | (vb)->vb2_queue->mem_ops->op(args) : 0) |
156 | |
157 | #define call_ptr_memop(op, vb, args...) \ |
158 | ((vb)->vb2_queue->mem_ops->op ? \ |
159 | (vb)->vb2_queue->mem_ops->op(vb, args) : NULL) |
160 | |
161 | #define call_void_memop(vb, op, args...) \ |
162 | do { \ |
163 | if ((vb)->vb2_queue->mem_ops->op) \ |
164 | (vb)->vb2_queue->mem_ops->op(args); \ |
165 | } while (0) |
166 | |
167 | #define call_qop(q, op, args...) \ |
168 | ((q)->ops->op ? (q)->ops->op(args) : 0) |
169 | |
170 | #define call_void_qop(q, op, args...) \ |
171 | do { \ |
172 | if ((q)->ops->op) \ |
173 | (q)->ops->op(args); \ |
174 | } while (0) |
175 | |
176 | #define call_vb_qop(vb, op, args...) \ |
177 | ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) |
178 | |
179 | #define call_void_vb_qop(vb, op, args...) \ |
180 | do { \ |
181 | if ((vb)->vb2_queue->ops->op) \ |
182 | (vb)->vb2_queue->ops->op(args); \ |
183 | } while (0) |
184 | |
185 | #endif |
186 | |
187 | #define call_bufop(q, op, args...) \ |
188 | ({ \ |
189 | int ret = 0; \ |
190 | if (q && q->buf_ops && q->buf_ops->op) \ |
191 | ret = q->buf_ops->op(args); \ |
192 | ret; \ |
193 | }) |
194 | |
195 | #define call_void_bufop(q, op, args...) \ |
196 | ({ \ |
197 | if (q && q->buf_ops && q->buf_ops->op) \ |
198 | q->buf_ops->op(args); \ |
199 | }) |
200 | |
201 | static void __vb2_queue_cancel(struct vb2_queue *q); |
202 | static void __enqueue_in_driver(struct vb2_buffer *vb); |
203 | |
204 | static const char *vb2_state_name(enum vb2_buffer_state s) |
205 | { |
206 | static const char * const state_names[] = { |
207 | [VB2_BUF_STATE_DEQUEUED] = "dequeued" , |
208 | [VB2_BUF_STATE_IN_REQUEST] = "in request" , |
209 | [VB2_BUF_STATE_PREPARING] = "preparing" , |
210 | [VB2_BUF_STATE_QUEUED] = "queued" , |
211 | [VB2_BUF_STATE_ACTIVE] = "active" , |
212 | [VB2_BUF_STATE_DONE] = "done" , |
213 | [VB2_BUF_STATE_ERROR] = "error" , |
214 | }; |
215 | |
216 | if ((unsigned int)(s) < ARRAY_SIZE(state_names)) |
217 | return state_names[s]; |
218 | return "unknown" ; |
219 | } |
220 | |
221 | /* |
222 | * __vb2_buf_mem_alloc() - allocate video memory for the given buffer |
223 | */ |
224 | static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) |
225 | { |
226 | struct vb2_queue *q = vb->vb2_queue; |
227 | void *mem_priv; |
228 | int plane; |
229 | int ret = -ENOMEM; |
230 | |
231 | /* |
232 | * Allocate memory for all planes in this buffer |
233 | * NOTE: mmapped areas should be page aligned |
234 | */ |
235 | for (plane = 0; plane < vb->num_planes; ++plane) { |
236 | /* Memops alloc requires size to be page aligned. */ |
237 | unsigned long size = PAGE_ALIGN(vb->planes[plane].length); |
238 | |
239 | /* Did it wrap around? */ |
240 | if (size < vb->planes[plane].length) |
241 | goto free; |
242 | |
243 | mem_priv = call_ptr_memop(alloc, |
244 | vb, |
245 | q->alloc_devs[plane] ? : q->dev, |
246 | size); |
247 | if (IS_ERR_OR_NULL(ptr: mem_priv)) { |
248 | if (mem_priv) |
249 | ret = PTR_ERR(ptr: mem_priv); |
250 | goto free; |
251 | } |
252 | |
253 | /* Associate allocator private data with this plane */ |
254 | vb->planes[plane].mem_priv = mem_priv; |
255 | } |
256 | |
257 | return 0; |
258 | free: |
259 | /* Free already allocated memory if one of the allocations failed */ |
260 | for (; plane > 0; --plane) { |
261 | call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); |
262 | vb->planes[plane - 1].mem_priv = NULL; |
263 | } |
264 | |
265 | return ret; |
266 | } |
267 | |
268 | /* |
269 | * __vb2_buf_mem_free() - free memory of the given buffer |
270 | */ |
271 | static void __vb2_buf_mem_free(struct vb2_buffer *vb) |
272 | { |
273 | unsigned int plane; |
274 | |
275 | for (plane = 0; plane < vb->num_planes; ++plane) { |
276 | call_void_memop(vb, put, vb->planes[plane].mem_priv); |
277 | vb->planes[plane].mem_priv = NULL; |
278 | dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n" , |
279 | plane, vb->index); |
280 | } |
281 | } |
282 | |
283 | /* |
284 | * __vb2_buf_userptr_put() - release userspace memory associated with |
285 | * a USERPTR buffer |
286 | */ |
287 | static void __vb2_buf_userptr_put(struct vb2_buffer *vb) |
288 | { |
289 | unsigned int plane; |
290 | |
291 | for (plane = 0; plane < vb->num_planes; ++plane) { |
292 | if (vb->planes[plane].mem_priv) |
293 | call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); |
294 | vb->planes[plane].mem_priv = NULL; |
295 | } |
296 | } |
297 | |
298 | /* |
299 | * __vb2_plane_dmabuf_put() - release memory associated with |
300 | * a DMABUF shared plane |
301 | */ |
302 | static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) |
303 | { |
304 | if (!p->mem_priv) |
305 | return; |
306 | |
307 | if (p->dbuf_mapped) |
308 | call_void_memop(vb, unmap_dmabuf, p->mem_priv); |
309 | |
310 | call_void_memop(vb, detach_dmabuf, p->mem_priv); |
311 | dma_buf_put(dmabuf: p->dbuf); |
312 | p->mem_priv = NULL; |
313 | p->dbuf = NULL; |
314 | p->dbuf_mapped = 0; |
315 | } |
316 | |
317 | /* |
318 | * __vb2_buf_dmabuf_put() - release memory associated with |
319 | * a DMABUF shared buffer |
320 | */ |
321 | static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) |
322 | { |
323 | unsigned int plane; |
324 | |
325 | for (plane = 0; plane < vb->num_planes; ++plane) |
326 | __vb2_plane_dmabuf_put(vb, p: &vb->planes[plane]); |
327 | } |
328 | |
329 | /* |
330 | * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory |
331 | * to sync caches |
332 | */ |
333 | static void __vb2_buf_mem_prepare(struct vb2_buffer *vb) |
334 | { |
335 | unsigned int plane; |
336 | |
337 | if (vb->synced) |
338 | return; |
339 | |
340 | vb->synced = 1; |
341 | for (plane = 0; plane < vb->num_planes; ++plane) |
342 | call_void_memop(vb, prepare, vb->planes[plane].mem_priv); |
343 | } |
344 | |
345 | /* |
346 | * __vb2_buf_mem_finish() - call ->finish on buffer's private memory |
347 | * to sync caches |
348 | */ |
349 | static void __vb2_buf_mem_finish(struct vb2_buffer *vb) |
350 | { |
351 | unsigned int plane; |
352 | |
353 | if (!vb->synced) |
354 | return; |
355 | |
356 | vb->synced = 0; |
357 | for (plane = 0; plane < vb->num_planes; ++plane) |
358 | call_void_memop(vb, finish, vb->planes[plane].mem_priv); |
359 | } |
360 | |
361 | /* |
362 | * __setup_offsets() - setup unique offsets ("cookies") for every plane in |
363 | * the buffer. |
364 | */ |
365 | static void __setup_offsets(struct vb2_buffer *vb) |
366 | { |
367 | struct vb2_queue *q = vb->vb2_queue; |
368 | unsigned int plane; |
369 | unsigned long offset = 0; |
370 | |
371 | /* |
372 | * The offset "cookie" value has the following constraints: |
373 | * - a buffer can have up to 8 planes. |
374 | * - v4l2 mem2mem uses bit 30 to distinguish between |
375 | * OUTPUT (aka "source", bit 30 is 0) and |
376 | * CAPTURE (aka "destination", bit 30 is 1) buffers. |
377 | * - must be page aligned |
378 | * That led to this bit mapping when PAGE_SHIFT = 12: |
379 | * |30 |29 15|14 12|11 0| |
380 | * |DST_QUEUE_OFF_BASE|buffer index|plane index| 0 | |
381 | * where there are 15 bits to store the buffer index. |
382 | * Depending on PAGE_SHIFT value we can have fewer bits |
383 | * to store the buffer index. |
384 | */ |
385 | offset = vb->index << PLANE_INDEX_SHIFT; |
386 | |
387 | for (plane = 0; plane < vb->num_planes; ++plane) { |
388 | vb->planes[plane].m.offset = offset + (plane << PAGE_SHIFT); |
389 | |
390 | dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n" , |
391 | vb->index, plane, offset); |
392 | } |
393 | } |
394 | |
395 | static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb) |
396 | { |
397 | /* |
398 | * DMA exporter should take care of cache syncs, so we can avoid |
399 | * explicit ->prepare()/->finish() syncs. For other ->memory types |
400 | * we always need ->prepare() or/and ->finish() cache sync. |
401 | */ |
402 | if (q->memory == VB2_MEMORY_DMABUF) { |
403 | vb->skip_cache_sync_on_finish = 1; |
404 | vb->skip_cache_sync_on_prepare = 1; |
405 | return; |
406 | } |
407 | |
408 | /* |
409 | * ->finish() cache sync can be avoided when queue direction is |
410 | * TO_DEVICE. |
411 | */ |
412 | if (q->dma_dir == DMA_TO_DEVICE) |
413 | vb->skip_cache_sync_on_finish = 1; |
414 | } |
415 | |
416 | /** |
417 | * vb2_queue_add_buffer() - add a buffer to a queue |
418 | * @q: pointer to &struct vb2_queue with videobuf2 queue. |
419 | * @vb: pointer to &struct vb2_buffer to be added to the queue. |
420 | * @index: index where add vb2_buffer in the queue |
421 | */ |
422 | static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, unsigned int index) |
423 | { |
424 | WARN_ON(index >= q->max_num_buffers || q->bufs[index] || vb->vb2_queue); |
425 | |
426 | q->bufs[index] = vb; |
427 | vb->index = index; |
428 | vb->vb2_queue = q; |
429 | } |
430 | |
431 | /** |
432 | * vb2_queue_remove_buffer() - remove a buffer from a queue |
433 | * @vb: pointer to &struct vb2_buffer to be removed from the queue. |
434 | */ |
435 | static void vb2_queue_remove_buffer(struct vb2_buffer *vb) |
436 | { |
437 | vb->vb2_queue->bufs[vb->index] = NULL; |
438 | vb->vb2_queue = NULL; |
439 | } |
440 | |
441 | /* |
442 | * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type) |
443 | * video buffer memory for all buffers/planes on the queue and initializes the |
444 | * queue |
445 | * |
446 | * Returns the number of buffers successfully allocated. |
447 | */ |
448 | static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, |
449 | unsigned int num_buffers, unsigned int num_planes, |
450 | const unsigned plane_sizes[VB2_MAX_PLANES]) |
451 | { |
452 | unsigned int q_num_buffers = vb2_get_num_buffers(q); |
453 | unsigned int buffer, plane; |
454 | struct vb2_buffer *vb; |
455 | int ret; |
456 | |
457 | /* |
458 | * Ensure that the number of already queue + the number of buffers already |
459 | * in the queue is below q->max_num_buffers |
460 | */ |
461 | num_buffers = min_t(unsigned int, num_buffers, |
462 | q->max_num_buffers - q_num_buffers); |
463 | |
464 | for (buffer = 0; buffer < num_buffers; ++buffer) { |
465 | /* Allocate vb2 buffer structures */ |
466 | vb = kzalloc(size: q->buf_struct_size, GFP_KERNEL); |
467 | if (!vb) { |
468 | dprintk(q, 1, "memory alloc for buffer struct failed\n" ); |
469 | break; |
470 | } |
471 | |
472 | vb->state = VB2_BUF_STATE_DEQUEUED; |
473 | vb->num_planes = num_planes; |
474 | vb->type = q->type; |
475 | vb->memory = memory; |
476 | init_buffer_cache_hints(q, vb); |
477 | for (plane = 0; plane < num_planes; ++plane) { |
478 | vb->planes[plane].length = plane_sizes[plane]; |
479 | vb->planes[plane].min_length = plane_sizes[plane]; |
480 | } |
481 | |
482 | vb2_queue_add_buffer(q, vb, index: q_num_buffers + buffer); |
483 | call_void_bufop(q, init_buffer, vb); |
484 | |
485 | /* Allocate video buffer memory for the MMAP type */ |
486 | if (memory == VB2_MEMORY_MMAP) { |
487 | ret = __vb2_buf_mem_alloc(vb); |
488 | if (ret) { |
489 | dprintk(q, 1, "failed allocating memory for buffer %d\n" , |
490 | buffer); |
491 | vb2_queue_remove_buffer(vb); |
492 | kfree(objp: vb); |
493 | break; |
494 | } |
495 | __setup_offsets(vb); |
496 | /* |
497 | * Call the driver-provided buffer initialization |
498 | * callback, if given. An error in initialization |
499 | * results in queue setup failure. |
500 | */ |
501 | ret = call_vb_qop(vb, buf_init, vb); |
502 | if (ret) { |
503 | dprintk(q, 1, "buffer %d %p initialization failed\n" , |
504 | buffer, vb); |
505 | __vb2_buf_mem_free(vb); |
506 | vb2_queue_remove_buffer(vb); |
507 | kfree(objp: vb); |
508 | break; |
509 | } |
510 | } |
511 | } |
512 | |
513 | dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n" , |
514 | buffer, num_planes); |
515 | |
516 | return buffer; |
517 | } |
518 | |
519 | /* |
520 | * __vb2_free_mem() - release all video buffer memory for a given queue |
521 | */ |
522 | static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) |
523 | { |
524 | unsigned int buffer; |
525 | struct vb2_buffer *vb; |
526 | unsigned int q_num_buffers = vb2_get_num_buffers(q); |
527 | |
528 | for (buffer = q_num_buffers - buffers; buffer < q_num_buffers; |
529 | ++buffer) { |
530 | vb = vb2_get_buffer(q, index: buffer); |
531 | if (!vb) |
532 | continue; |
533 | |
534 | /* Free MMAP buffers or release USERPTR buffers */ |
535 | if (q->memory == VB2_MEMORY_MMAP) |
536 | __vb2_buf_mem_free(vb); |
537 | else if (q->memory == VB2_MEMORY_DMABUF) |
538 | __vb2_buf_dmabuf_put(vb); |
539 | else |
540 | __vb2_buf_userptr_put(vb); |
541 | } |
542 | } |
543 | |
544 | /* |
545 | * __vb2_queue_free() - free buffers at the end of the queue - video memory and |
546 | * related information, if no buffers are left return the queue to an |
547 | * uninitialized state. Might be called even if the queue has already been freed. |
548 | */ |
549 | static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) |
550 | { |
551 | unsigned int buffer; |
552 | unsigned int q_num_buffers = vb2_get_num_buffers(q); |
553 | |
554 | lockdep_assert_held(&q->mmap_lock); |
555 | |
556 | /* Call driver-provided cleanup function for each buffer, if provided */ |
557 | for (buffer = q_num_buffers - buffers; buffer < q_num_buffers; |
558 | ++buffer) { |
559 | struct vb2_buffer *vb = vb2_get_buffer(q, index: buffer); |
560 | |
561 | if (vb && vb->planes[0].mem_priv) |
562 | call_void_vb_qop(vb, buf_cleanup, vb); |
563 | } |
564 | |
565 | /* Release video buffer memory */ |
566 | __vb2_free_mem(q, buffers); |
567 | |
568 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
569 | /* |
570 | * Check that all the calls were balanced during the life-time of this |
571 | * queue. If not then dump the counters to the kernel log. |
572 | */ |
573 | if (q_num_buffers) { |
574 | bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || |
575 | q->cnt_prepare_streaming != q->cnt_unprepare_streaming || |
576 | q->cnt_wait_prepare != q->cnt_wait_finish; |
577 | |
578 | if (unbalanced) { |
579 | pr_info("unbalanced counters for queue %p:\n" , q); |
580 | if (q->cnt_start_streaming != q->cnt_stop_streaming) |
581 | pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n" , |
582 | q->cnt_queue_setup, q->cnt_start_streaming, |
583 | q->cnt_stop_streaming); |
584 | if (q->cnt_prepare_streaming != q->cnt_unprepare_streaming) |
585 | pr_info(" prepare_streaming: %u unprepare_streaming: %u\n" , |
586 | q->cnt_prepare_streaming, q->cnt_unprepare_streaming); |
587 | if (q->cnt_wait_prepare != q->cnt_wait_finish) |
588 | pr_info(" wait_prepare: %u wait_finish: %u\n" , |
589 | q->cnt_wait_prepare, q->cnt_wait_finish); |
590 | } |
591 | q->cnt_queue_setup = 0; |
592 | q->cnt_wait_prepare = 0; |
593 | q->cnt_wait_finish = 0; |
594 | q->cnt_prepare_streaming = 0; |
595 | q->cnt_start_streaming = 0; |
596 | q->cnt_stop_streaming = 0; |
597 | q->cnt_unprepare_streaming = 0; |
598 | } |
599 | for (buffer = 0; buffer < vb2_get_num_buffers(q); buffer++) { |
600 | struct vb2_buffer *vb = vb2_get_buffer(q, index: buffer); |
601 | bool unbalanced; |
602 | |
603 | if (!vb) |
604 | continue; |
605 | |
606 | unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || |
607 | vb->cnt_mem_prepare != vb->cnt_mem_finish || |
608 | vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || |
609 | vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || |
610 | vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || |
611 | vb->cnt_buf_queue != vb->cnt_buf_done || |
612 | vb->cnt_buf_prepare != vb->cnt_buf_finish || |
613 | vb->cnt_buf_init != vb->cnt_buf_cleanup; |
614 | |
615 | if (unbalanced) { |
616 | pr_info("unbalanced counters for queue %p, buffer %d:\n" , |
617 | q, buffer); |
618 | if (vb->cnt_buf_init != vb->cnt_buf_cleanup) |
619 | pr_info(" buf_init: %u buf_cleanup: %u\n" , |
620 | vb->cnt_buf_init, vb->cnt_buf_cleanup); |
621 | if (vb->cnt_buf_prepare != vb->cnt_buf_finish) |
622 | pr_info(" buf_prepare: %u buf_finish: %u\n" , |
623 | vb->cnt_buf_prepare, vb->cnt_buf_finish); |
624 | if (vb->cnt_buf_queue != vb->cnt_buf_done) |
625 | pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n" , |
626 | vb->cnt_buf_out_validate, vb->cnt_buf_queue, |
627 | vb->cnt_buf_done, vb->cnt_buf_request_complete); |
628 | if (vb->cnt_mem_alloc != vb->cnt_mem_put) |
629 | pr_info(" alloc: %u put: %u\n" , |
630 | vb->cnt_mem_alloc, vb->cnt_mem_put); |
631 | if (vb->cnt_mem_prepare != vb->cnt_mem_finish) |
632 | pr_info(" prepare: %u finish: %u\n" , |
633 | vb->cnt_mem_prepare, vb->cnt_mem_finish); |
634 | if (vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr) |
635 | pr_info(" get_userptr: %u put_userptr: %u\n" , |
636 | vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); |
637 | if (vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf) |
638 | pr_info(" attach_dmabuf: %u detach_dmabuf: %u\n" , |
639 | vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf); |
640 | if (vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf) |
641 | pr_info(" map_dmabuf: %u unmap_dmabuf: %u\n" , |
642 | vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); |
643 | pr_info(" get_dmabuf: %u num_users: %u\n" , |
644 | vb->cnt_mem_get_dmabuf, |
645 | vb->cnt_mem_num_users); |
646 | } |
647 | } |
648 | #endif |
649 | |
650 | /* Free vb2 buffers */ |
651 | for (buffer = q_num_buffers - buffers; buffer < q_num_buffers; |
652 | ++buffer) { |
653 | struct vb2_buffer *vb = vb2_get_buffer(q, index: buffer); |
654 | |
655 | if (!vb) |
656 | continue; |
657 | |
658 | vb2_queue_remove_buffer(vb); |
659 | kfree(objp: vb); |
660 | } |
661 | |
662 | q->num_buffers -= buffers; |
663 | if (!vb2_get_num_buffers(q)) { |
664 | q->memory = VB2_MEMORY_UNKNOWN; |
665 | INIT_LIST_HEAD(list: &q->queued_list); |
666 | } |
667 | } |
668 | |
669 | bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) |
670 | { |
671 | unsigned int plane; |
672 | for (plane = 0; plane < vb->num_planes; ++plane) { |
673 | void *mem_priv = vb->planes[plane].mem_priv; |
674 | /* |
675 | * If num_users() has not been provided, call_memop |
676 | * will return 0, apparently nobody cares about this |
677 | * case anyway. If num_users() returns more than 1, |
678 | * we are not the only user of the plane's memory. |
679 | */ |
680 | if (mem_priv && call_memop(vb, num_users, mem_priv) > 1) |
681 | return true; |
682 | } |
683 | return false; |
684 | } |
685 | EXPORT_SYMBOL(vb2_buffer_in_use); |
686 | |
687 | /* |
688 | * __buffers_in_use() - return true if any buffers on the queue are in use and |
689 | * the queue cannot be freed (by the means of REQBUFS(0)) call |
690 | */ |
691 | static bool __buffers_in_use(struct vb2_queue *q) |
692 | { |
693 | unsigned int buffer; |
694 | for (buffer = 0; buffer < vb2_get_num_buffers(q); ++buffer) { |
695 | struct vb2_buffer *vb = vb2_get_buffer(q, index: buffer); |
696 | |
697 | if (!vb) |
698 | continue; |
699 | |
700 | if (vb2_buffer_in_use(q, vb)) |
701 | return true; |
702 | } |
703 | return false; |
704 | } |
705 | |
706 | void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb) |
707 | { |
708 | call_void_bufop(q, fill_user_buffer, vb, pb); |
709 | } |
710 | EXPORT_SYMBOL_GPL(vb2_core_querybuf); |
711 | |
712 | /* |
713 | * __verify_userptr_ops() - verify that all memory operations required for |
714 | * USERPTR queue type have been provided |
715 | */ |
716 | static int __verify_userptr_ops(struct vb2_queue *q) |
717 | { |
718 | if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || |
719 | !q->mem_ops->put_userptr) |
720 | return -EINVAL; |
721 | |
722 | return 0; |
723 | } |
724 | |
725 | /* |
726 | * __verify_mmap_ops() - verify that all memory operations required for |
727 | * MMAP queue type have been provided |
728 | */ |
729 | static int __verify_mmap_ops(struct vb2_queue *q) |
730 | { |
731 | if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || |
732 | !q->mem_ops->put || !q->mem_ops->mmap) |
733 | return -EINVAL; |
734 | |
735 | return 0; |
736 | } |
737 | |
738 | /* |
739 | * __verify_dmabuf_ops() - verify that all memory operations required for |
740 | * DMABUF queue type have been provided |
741 | */ |
742 | static int __verify_dmabuf_ops(struct vb2_queue *q) |
743 | { |
744 | if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || |
745 | !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || |
746 | !q->mem_ops->unmap_dmabuf) |
747 | return -EINVAL; |
748 | |
749 | return 0; |
750 | } |
751 | |
752 | int vb2_verify_memory_type(struct vb2_queue *q, |
753 | enum vb2_memory memory, unsigned int type) |
754 | { |
755 | if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && |
756 | memory != VB2_MEMORY_DMABUF) { |
757 | dprintk(q, 1, "unsupported memory type\n" ); |
758 | return -EINVAL; |
759 | } |
760 | |
761 | if (type != q->type) { |
762 | dprintk(q, 1, "requested type is incorrect\n" ); |
763 | return -EINVAL; |
764 | } |
765 | |
766 | /* |
767 | * Make sure all the required memory ops for given memory type |
768 | * are available. |
769 | */ |
770 | if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { |
771 | dprintk(q, 1, "MMAP for current setup unsupported\n" ); |
772 | return -EINVAL; |
773 | } |
774 | |
775 | if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { |
776 | dprintk(q, 1, "USERPTR for current setup unsupported\n" ); |
777 | return -EINVAL; |
778 | } |
779 | |
780 | if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { |
781 | dprintk(q, 1, "DMABUF for current setup unsupported\n" ); |
782 | return -EINVAL; |
783 | } |
784 | |
785 | /* |
786 | * Place the busy tests at the end: -EBUSY can be ignored when |
787 | * create_bufs is called with count == 0, but count == 0 should still |
788 | * do the memory and type validation. |
789 | */ |
790 | if (vb2_fileio_is_active(q)) { |
791 | dprintk(q, 1, "file io in progress\n" ); |
792 | return -EBUSY; |
793 | } |
794 | return 0; |
795 | } |
796 | EXPORT_SYMBOL(vb2_verify_memory_type); |
797 | |
798 | static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem) |
799 | { |
800 | q->non_coherent_mem = 0; |
801 | |
802 | if (!vb2_queue_allows_cache_hints(q)) |
803 | return; |
804 | q->non_coherent_mem = non_coherent_mem; |
805 | } |
806 | |
807 | static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem) |
808 | { |
809 | if (non_coherent_mem != q->non_coherent_mem) { |
810 | dprintk(q, 1, "memory coherency model mismatch\n" ); |
811 | return false; |
812 | } |
813 | return true; |
814 | } |
815 | |
816 | int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, |
817 | unsigned int flags, unsigned int *count) |
818 | { |
819 | unsigned int num_buffers, allocated_buffers, num_planes = 0; |
820 | unsigned int q_num_bufs = vb2_get_num_buffers(q); |
821 | unsigned plane_sizes[VB2_MAX_PLANES] = { }; |
822 | bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; |
823 | unsigned int i; |
824 | int ret = 0; |
825 | |
826 | if (q->streaming) { |
827 | dprintk(q, 1, "streaming active\n" ); |
828 | return -EBUSY; |
829 | } |
830 | |
831 | if (q->waiting_in_dqbuf && *count) { |
832 | dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n" ); |
833 | return -EBUSY; |
834 | } |
835 | |
836 | if (*count == 0 || q_num_bufs != 0 || |
837 | (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || |
838 | !verify_coherency_flags(q, non_coherent_mem)) { |
839 | /* |
840 | * We already have buffers allocated, so first check if they |
841 | * are not in use and can be freed. |
842 | */ |
843 | mutex_lock(&q->mmap_lock); |
844 | if (debug && q->memory == VB2_MEMORY_MMAP && |
845 | __buffers_in_use(q)) |
846 | dprintk(q, 1, "memory in use, orphaning buffers\n" ); |
847 | |
848 | /* |
849 | * Call queue_cancel to clean up any buffers in the |
850 | * QUEUED state which is possible if buffers were prepared or |
851 | * queued without ever calling STREAMON. |
852 | */ |
853 | __vb2_queue_cancel(q); |
854 | __vb2_queue_free(q, buffers: q_num_bufs); |
855 | mutex_unlock(lock: &q->mmap_lock); |
856 | |
857 | /* |
858 | * In case of REQBUFS(0) return immediately without calling |
859 | * driver's queue_setup() callback and allocating resources. |
860 | */ |
861 | if (*count == 0) |
862 | return 0; |
863 | } |
864 | |
865 | /* |
866 | * Make sure the requested values and current defaults are sane. |
867 | */ |
868 | num_buffers = max_t(unsigned int, *count, q->min_queued_buffers); |
869 | num_buffers = min_t(unsigned int, num_buffers, q->max_num_buffers); |
870 | memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); |
871 | /* |
872 | * Set this now to ensure that drivers see the correct q->memory value |
873 | * in the queue_setup op. |
874 | */ |
875 | mutex_lock(&q->mmap_lock); |
876 | if (!q->bufs) |
877 | q->bufs = kcalloc(n: q->max_num_buffers, size: sizeof(*q->bufs), GFP_KERNEL); |
878 | if (!q->bufs) |
879 | ret = -ENOMEM; |
880 | q->memory = memory; |
881 | mutex_unlock(lock: &q->mmap_lock); |
882 | if (ret) |
883 | return ret; |
884 | set_queue_coherency(q, non_coherent_mem); |
885 | |
886 | /* |
887 | * Ask the driver how many buffers and planes per buffer it requires. |
888 | * Driver also sets the size and allocator context for each plane. |
889 | */ |
890 | ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, |
891 | plane_sizes, q->alloc_devs); |
892 | if (ret) |
893 | goto error; |
894 | |
895 | /* Check that driver has set sane values */ |
896 | if (WARN_ON(!num_planes)) { |
897 | ret = -EINVAL; |
898 | goto error; |
899 | } |
900 | |
901 | for (i = 0; i < num_planes; i++) |
902 | if (WARN_ON(!plane_sizes[i])) { |
903 | ret = -EINVAL; |
904 | goto error; |
905 | } |
906 | |
907 | /* Finally, allocate buffers and video memory */ |
908 | allocated_buffers = |
909 | __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); |
910 | if (allocated_buffers == 0) { |
911 | dprintk(q, 1, "memory allocation failed\n" ); |
912 | ret = -ENOMEM; |
913 | goto error; |
914 | } |
915 | |
916 | /* |
917 | * There is no point in continuing if we can't allocate the minimum |
918 | * number of buffers needed by this vb2_queue. |
919 | */ |
920 | if (allocated_buffers < q->min_queued_buffers) |
921 | ret = -ENOMEM; |
922 | |
923 | /* |
924 | * Check if driver can handle the allocated number of buffers. |
925 | */ |
926 | if (!ret && allocated_buffers < num_buffers) { |
927 | num_buffers = allocated_buffers; |
928 | /* |
929 | * num_planes is set by the previous queue_setup(), but since it |
930 | * signals to queue_setup() whether it is called from create_bufs() |
931 | * vs reqbufs() we zero it here to signal that queue_setup() is |
932 | * called for the reqbufs() case. |
933 | */ |
934 | num_planes = 0; |
935 | |
936 | ret = call_qop(q, queue_setup, q, &num_buffers, |
937 | &num_planes, plane_sizes, q->alloc_devs); |
938 | |
939 | if (!ret && allocated_buffers < num_buffers) |
940 | ret = -ENOMEM; |
941 | |
942 | /* |
943 | * Either the driver has accepted a smaller number of buffers, |
944 | * or .queue_setup() returned an error |
945 | */ |
946 | } |
947 | |
948 | mutex_lock(&q->mmap_lock); |
949 | q->num_buffers = allocated_buffers; |
950 | |
951 | if (ret < 0) { |
952 | /* |
953 | * Note: __vb2_queue_free() will subtract 'allocated_buffers' |
954 | * from already queued buffers and it will reset q->memory to |
955 | * VB2_MEMORY_UNKNOWN. |
956 | */ |
957 | __vb2_queue_free(q, buffers: allocated_buffers); |
958 | mutex_unlock(lock: &q->mmap_lock); |
959 | return ret; |
960 | } |
961 | mutex_unlock(lock: &q->mmap_lock); |
962 | |
963 | /* |
964 | * Return the number of successfully allocated buffers |
965 | * to the userspace. |
966 | */ |
967 | *count = allocated_buffers; |
968 | q->waiting_for_buffers = !q->is_output; |
969 | |
970 | return 0; |
971 | |
972 | error: |
973 | mutex_lock(&q->mmap_lock); |
974 | q->memory = VB2_MEMORY_UNKNOWN; |
975 | mutex_unlock(lock: &q->mmap_lock); |
976 | return ret; |
977 | } |
978 | EXPORT_SYMBOL_GPL(vb2_core_reqbufs); |
979 | |
980 | int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, |
981 | unsigned int flags, unsigned int *count, |
982 | unsigned int requested_planes, |
983 | const unsigned int requested_sizes[]) |
984 | { |
985 | unsigned int num_planes = 0, num_buffers, allocated_buffers; |
986 | unsigned plane_sizes[VB2_MAX_PLANES] = { }; |
987 | bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; |
988 | unsigned int q_num_bufs = vb2_get_num_buffers(q); |
989 | bool no_previous_buffers = !q_num_bufs; |
990 | int ret = 0; |
991 | |
992 | if (q_num_bufs == q->max_num_buffers) { |
993 | dprintk(q, 1, "maximum number of buffers already allocated\n" ); |
994 | return -ENOBUFS; |
995 | } |
996 | |
997 | if (no_previous_buffers) { |
998 | if (q->waiting_in_dqbuf && *count) { |
999 | dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n" ); |
1000 | return -EBUSY; |
1001 | } |
1002 | memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); |
1003 | /* |
1004 | * Set this now to ensure that drivers see the correct q->memory |
1005 | * value in the queue_setup op. |
1006 | */ |
1007 | mutex_lock(&q->mmap_lock); |
1008 | q->memory = memory; |
1009 | if (!q->bufs) |
1010 | q->bufs = kcalloc(n: q->max_num_buffers, size: sizeof(*q->bufs), GFP_KERNEL); |
1011 | if (!q->bufs) |
1012 | ret = -ENOMEM; |
1013 | mutex_unlock(lock: &q->mmap_lock); |
1014 | if (ret) |
1015 | return ret; |
1016 | q->waiting_for_buffers = !q->is_output; |
1017 | set_queue_coherency(q, non_coherent_mem); |
1018 | } else { |
1019 | if (q->memory != memory) { |
1020 | dprintk(q, 1, "memory model mismatch\n" ); |
1021 | return -EINVAL; |
1022 | } |
1023 | if (!verify_coherency_flags(q, non_coherent_mem)) |
1024 | return -EINVAL; |
1025 | } |
1026 | |
1027 | num_buffers = min(*count, q->max_num_buffers - q_num_bufs); |
1028 | |
1029 | if (requested_planes && requested_sizes) { |
1030 | num_planes = requested_planes; |
1031 | memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); |
1032 | } |
1033 | |
1034 | /* |
1035 | * Ask the driver, whether the requested number of buffers, planes per |
1036 | * buffer and their sizes are acceptable |
1037 | */ |
1038 | ret = call_qop(q, queue_setup, q, &num_buffers, |
1039 | &num_planes, plane_sizes, q->alloc_devs); |
1040 | if (ret) |
1041 | goto error; |
1042 | |
1043 | /* Finally, allocate buffers and video memory */ |
1044 | allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, |
1045 | num_planes, plane_sizes); |
1046 | if (allocated_buffers == 0) { |
1047 | dprintk(q, 1, "memory allocation failed\n" ); |
1048 | ret = -ENOMEM; |
1049 | goto error; |
1050 | } |
1051 | |
1052 | /* |
1053 | * Check if driver can handle the so far allocated number of buffers. |
1054 | */ |
1055 | if (allocated_buffers < num_buffers) { |
1056 | num_buffers = allocated_buffers; |
1057 | |
1058 | /* |
1059 | * num_buffers contains the total number of buffers, that the |
1060 | * queue driver has set up |
1061 | */ |
1062 | ret = call_qop(q, queue_setup, q, &num_buffers, |
1063 | &num_planes, plane_sizes, q->alloc_devs); |
1064 | |
1065 | if (!ret && allocated_buffers < num_buffers) |
1066 | ret = -ENOMEM; |
1067 | |
1068 | /* |
1069 | * Either the driver has accepted a smaller number of buffers, |
1070 | * or .queue_setup() returned an error |
1071 | */ |
1072 | } |
1073 | |
1074 | mutex_lock(&q->mmap_lock); |
1075 | q->num_buffers += allocated_buffers; |
1076 | |
1077 | if (ret < 0) { |
1078 | /* |
1079 | * Note: __vb2_queue_free() will subtract 'allocated_buffers' |
1080 | * from already queued buffers and it will reset q->memory to |
1081 | * VB2_MEMORY_UNKNOWN. |
1082 | */ |
1083 | __vb2_queue_free(q, buffers: allocated_buffers); |
1084 | mutex_unlock(lock: &q->mmap_lock); |
1085 | return -ENOMEM; |
1086 | } |
1087 | mutex_unlock(lock: &q->mmap_lock); |
1088 | |
1089 | /* |
1090 | * Return the number of successfully allocated buffers |
1091 | * to the userspace. |
1092 | */ |
1093 | *count = allocated_buffers; |
1094 | |
1095 | return 0; |
1096 | |
1097 | error: |
1098 | if (no_previous_buffers) { |
1099 | mutex_lock(&q->mmap_lock); |
1100 | q->memory = VB2_MEMORY_UNKNOWN; |
1101 | mutex_unlock(lock: &q->mmap_lock); |
1102 | } |
1103 | return ret; |
1104 | } |
1105 | EXPORT_SYMBOL_GPL(vb2_core_create_bufs); |
1106 | |
1107 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) |
1108 | { |
1109 | if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) |
1110 | return NULL; |
1111 | |
1112 | return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv); |
1113 | |
1114 | } |
1115 | EXPORT_SYMBOL_GPL(vb2_plane_vaddr); |
1116 | |
1117 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) |
1118 | { |
1119 | if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) |
1120 | return NULL; |
1121 | |
1122 | return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv); |
1123 | } |
1124 | EXPORT_SYMBOL_GPL(vb2_plane_cookie); |
1125 | |
1126 | void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) |
1127 | { |
1128 | struct vb2_queue *q = vb->vb2_queue; |
1129 | unsigned long flags; |
1130 | |
1131 | if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) |
1132 | return; |
1133 | |
1134 | if (WARN_ON(state != VB2_BUF_STATE_DONE && |
1135 | state != VB2_BUF_STATE_ERROR && |
1136 | state != VB2_BUF_STATE_QUEUED)) |
1137 | state = VB2_BUF_STATE_ERROR; |
1138 | |
1139 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
1140 | /* |
1141 | * Although this is not a callback, it still does have to balance |
1142 | * with the buf_queue op. So update this counter manually. |
1143 | */ |
1144 | vb->cnt_buf_done++; |
1145 | #endif |
1146 | dprintk(q, 4, "done processing on buffer %d, state: %s\n" , |
1147 | vb->index, vb2_state_name(state)); |
1148 | |
1149 | if (state != VB2_BUF_STATE_QUEUED) |
1150 | __vb2_buf_mem_finish(vb); |
1151 | |
1152 | spin_lock_irqsave(&q->done_lock, flags); |
1153 | if (state == VB2_BUF_STATE_QUEUED) { |
1154 | vb->state = VB2_BUF_STATE_QUEUED; |
1155 | } else { |
1156 | /* Add the buffer to the done buffers list */ |
1157 | list_add_tail(new: &vb->done_entry, head: &q->done_list); |
1158 | vb->state = state; |
1159 | } |
1160 | atomic_dec(v: &q->owned_by_drv_count); |
1161 | |
1162 | if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { |
1163 | media_request_object_unbind(obj: &vb->req_obj); |
1164 | media_request_object_put(obj: &vb->req_obj); |
1165 | } |
1166 | |
1167 | spin_unlock_irqrestore(lock: &q->done_lock, flags); |
1168 | |
1169 | trace_vb2_buf_done(q, vb); |
1170 | |
1171 | switch (state) { |
1172 | case VB2_BUF_STATE_QUEUED: |
1173 | return; |
1174 | default: |
1175 | /* Inform any processes that may be waiting for buffers */ |
1176 | wake_up(&q->done_wq); |
1177 | break; |
1178 | } |
1179 | } |
1180 | EXPORT_SYMBOL_GPL(vb2_buffer_done); |
1181 | |
1182 | void vb2_discard_done(struct vb2_queue *q) |
1183 | { |
1184 | struct vb2_buffer *vb; |
1185 | unsigned long flags; |
1186 | |
1187 | spin_lock_irqsave(&q->done_lock, flags); |
1188 | list_for_each_entry(vb, &q->done_list, done_entry) |
1189 | vb->state = VB2_BUF_STATE_ERROR; |
1190 | spin_unlock_irqrestore(lock: &q->done_lock, flags); |
1191 | } |
1192 | EXPORT_SYMBOL_GPL(vb2_discard_done); |
1193 | |
1194 | /* |
1195 | * __prepare_mmap() - prepare an MMAP buffer |
1196 | */ |
1197 | static int __prepare_mmap(struct vb2_buffer *vb) |
1198 | { |
1199 | int ret = 0; |
1200 | |
1201 | ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, |
1202 | vb, vb->planes); |
1203 | return ret ? ret : call_vb_qop(vb, buf_prepare, vb); |
1204 | } |
1205 | |
1206 | /* |
1207 | * __prepare_userptr() - prepare a USERPTR buffer |
1208 | */ |
1209 | static int __prepare_userptr(struct vb2_buffer *vb) |
1210 | { |
1211 | struct vb2_plane planes[VB2_MAX_PLANES]; |
1212 | struct vb2_queue *q = vb->vb2_queue; |
1213 | void *mem_priv; |
1214 | unsigned int plane; |
1215 | int ret = 0; |
1216 | bool reacquired = vb->planes[0].mem_priv == NULL; |
1217 | |
1218 | memset(planes, 0, sizeof(planes[0]) * vb->num_planes); |
1219 | /* Copy relevant information provided by the userspace */ |
1220 | ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, |
1221 | vb, planes); |
1222 | if (ret) |
1223 | return ret; |
1224 | |
1225 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1226 | /* Skip the plane if already verified */ |
1227 | if (vb->planes[plane].m.userptr && |
1228 | vb->planes[plane].m.userptr == planes[plane].m.userptr |
1229 | && vb->planes[plane].length == planes[plane].length) |
1230 | continue; |
1231 | |
1232 | dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n" , |
1233 | plane); |
1234 | |
1235 | /* Check if the provided plane buffer is large enough */ |
1236 | if (planes[plane].length < vb->planes[plane].min_length) { |
1237 | dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n" , |
1238 | planes[plane].length, |
1239 | vb->planes[plane].min_length, |
1240 | plane); |
1241 | ret = -EINVAL; |
1242 | goto err; |
1243 | } |
1244 | |
1245 | /* Release previously acquired memory if present */ |
1246 | if (vb->planes[plane].mem_priv) { |
1247 | if (!reacquired) { |
1248 | reacquired = true; |
1249 | vb->copied_timestamp = 0; |
1250 | call_void_vb_qop(vb, buf_cleanup, vb); |
1251 | } |
1252 | call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); |
1253 | } |
1254 | |
1255 | vb->planes[plane].mem_priv = NULL; |
1256 | vb->planes[plane].bytesused = 0; |
1257 | vb->planes[plane].length = 0; |
1258 | vb->planes[plane].m.userptr = 0; |
1259 | vb->planes[plane].data_offset = 0; |
1260 | |
1261 | /* Acquire each plane's memory */ |
1262 | mem_priv = call_ptr_memop(get_userptr, |
1263 | vb, |
1264 | q->alloc_devs[plane] ? : q->dev, |
1265 | planes[plane].m.userptr, |
1266 | planes[plane].length); |
1267 | if (IS_ERR(ptr: mem_priv)) { |
1268 | dprintk(q, 1, "failed acquiring userspace memory for plane %d\n" , |
1269 | plane); |
1270 | ret = PTR_ERR(ptr: mem_priv); |
1271 | goto err; |
1272 | } |
1273 | vb->planes[plane].mem_priv = mem_priv; |
1274 | } |
1275 | |
1276 | /* |
1277 | * Now that everything is in order, copy relevant information |
1278 | * provided by userspace. |
1279 | */ |
1280 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1281 | vb->planes[plane].bytesused = planes[plane].bytesused; |
1282 | vb->planes[plane].length = planes[plane].length; |
1283 | vb->planes[plane].m.userptr = planes[plane].m.userptr; |
1284 | vb->planes[plane].data_offset = planes[plane].data_offset; |
1285 | } |
1286 | |
1287 | if (reacquired) { |
1288 | /* |
1289 | * One or more planes changed, so we must call buf_init to do |
1290 | * the driver-specific initialization on the newly acquired |
1291 | * buffer, if provided. |
1292 | */ |
1293 | ret = call_vb_qop(vb, buf_init, vb); |
1294 | if (ret) { |
1295 | dprintk(q, 1, "buffer initialization failed\n" ); |
1296 | goto err; |
1297 | } |
1298 | } |
1299 | |
1300 | ret = call_vb_qop(vb, buf_prepare, vb); |
1301 | if (ret) { |
1302 | dprintk(q, 1, "buffer preparation failed\n" ); |
1303 | call_void_vb_qop(vb, buf_cleanup, vb); |
1304 | goto err; |
1305 | } |
1306 | |
1307 | return 0; |
1308 | err: |
1309 | /* In case of errors, release planes that were already acquired */ |
1310 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1311 | if (vb->planes[plane].mem_priv) |
1312 | call_void_memop(vb, put_userptr, |
1313 | vb->planes[plane].mem_priv); |
1314 | vb->planes[plane].mem_priv = NULL; |
1315 | vb->planes[plane].m.userptr = 0; |
1316 | vb->planes[plane].length = 0; |
1317 | } |
1318 | |
1319 | return ret; |
1320 | } |
1321 | |
1322 | /* |
1323 | * __prepare_dmabuf() - prepare a DMABUF buffer |
1324 | */ |
1325 | static int __prepare_dmabuf(struct vb2_buffer *vb) |
1326 | { |
1327 | struct vb2_plane planes[VB2_MAX_PLANES]; |
1328 | struct vb2_queue *q = vb->vb2_queue; |
1329 | void *mem_priv; |
1330 | unsigned int plane; |
1331 | int ret = 0; |
1332 | bool reacquired = vb->planes[0].mem_priv == NULL; |
1333 | |
1334 | memset(planes, 0, sizeof(planes[0]) * vb->num_planes); |
1335 | /* Copy relevant information provided by the userspace */ |
1336 | ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, |
1337 | vb, planes); |
1338 | if (ret) |
1339 | return ret; |
1340 | |
1341 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1342 | struct dma_buf *dbuf = dma_buf_get(fd: planes[plane].m.fd); |
1343 | |
1344 | if (IS_ERR_OR_NULL(ptr: dbuf)) { |
1345 | dprintk(q, 1, "invalid dmabuf fd for plane %d\n" , |
1346 | plane); |
1347 | ret = -EINVAL; |
1348 | goto err; |
1349 | } |
1350 | |
1351 | /* use DMABUF size if length is not provided */ |
1352 | if (planes[plane].length == 0) |
1353 | planes[plane].length = dbuf->size; |
1354 | |
1355 | if (planes[plane].length < vb->planes[plane].min_length) { |
1356 | dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n" , |
1357 | planes[plane].length, plane, |
1358 | vb->planes[plane].min_length); |
1359 | dma_buf_put(dmabuf: dbuf); |
1360 | ret = -EINVAL; |
1361 | goto err; |
1362 | } |
1363 | |
1364 | /* Skip the plane if already verified */ |
1365 | if (dbuf == vb->planes[plane].dbuf && |
1366 | vb->planes[plane].length == planes[plane].length) { |
1367 | dma_buf_put(dmabuf: dbuf); |
1368 | continue; |
1369 | } |
1370 | |
1371 | dprintk(q, 3, "buffer for plane %d changed\n" , plane); |
1372 | |
1373 | if (!reacquired) { |
1374 | reacquired = true; |
1375 | vb->copied_timestamp = 0; |
1376 | call_void_vb_qop(vb, buf_cleanup, vb); |
1377 | } |
1378 | |
1379 | /* Release previously acquired memory if present */ |
1380 | __vb2_plane_dmabuf_put(vb, p: &vb->planes[plane]); |
1381 | vb->planes[plane].bytesused = 0; |
1382 | vb->planes[plane].length = 0; |
1383 | vb->planes[plane].m.fd = 0; |
1384 | vb->planes[plane].data_offset = 0; |
1385 | |
1386 | /* Acquire each plane's memory */ |
1387 | mem_priv = call_ptr_memop(attach_dmabuf, |
1388 | vb, |
1389 | q->alloc_devs[plane] ? : q->dev, |
1390 | dbuf, |
1391 | planes[plane].length); |
1392 | if (IS_ERR(ptr: mem_priv)) { |
1393 | dprintk(q, 1, "failed to attach dmabuf\n" ); |
1394 | ret = PTR_ERR(ptr: mem_priv); |
1395 | dma_buf_put(dmabuf: dbuf); |
1396 | goto err; |
1397 | } |
1398 | |
1399 | vb->planes[plane].dbuf = dbuf; |
1400 | vb->planes[plane].mem_priv = mem_priv; |
1401 | } |
1402 | |
1403 | /* |
1404 | * This pins the buffer(s) with dma_buf_map_attachment()). It's done |
1405 | * here instead just before the DMA, while queueing the buffer(s) so |
1406 | * userspace knows sooner rather than later if the dma-buf map fails. |
1407 | */ |
1408 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1409 | if (vb->planes[plane].dbuf_mapped) |
1410 | continue; |
1411 | |
1412 | ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); |
1413 | if (ret) { |
1414 | dprintk(q, 1, "failed to map dmabuf for plane %d\n" , |
1415 | plane); |
1416 | goto err; |
1417 | } |
1418 | vb->planes[plane].dbuf_mapped = 1; |
1419 | } |
1420 | |
1421 | /* |
1422 | * Now that everything is in order, copy relevant information |
1423 | * provided by userspace. |
1424 | */ |
1425 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1426 | vb->planes[plane].bytesused = planes[plane].bytesused; |
1427 | vb->planes[plane].length = planes[plane].length; |
1428 | vb->planes[plane].m.fd = planes[plane].m.fd; |
1429 | vb->planes[plane].data_offset = planes[plane].data_offset; |
1430 | } |
1431 | |
1432 | if (reacquired) { |
1433 | /* |
1434 | * Call driver-specific initialization on the newly acquired buffer, |
1435 | * if provided. |
1436 | */ |
1437 | ret = call_vb_qop(vb, buf_init, vb); |
1438 | if (ret) { |
1439 | dprintk(q, 1, "buffer initialization failed\n" ); |
1440 | goto err; |
1441 | } |
1442 | } |
1443 | |
1444 | ret = call_vb_qop(vb, buf_prepare, vb); |
1445 | if (ret) { |
1446 | dprintk(q, 1, "buffer preparation failed\n" ); |
1447 | call_void_vb_qop(vb, buf_cleanup, vb); |
1448 | goto err; |
1449 | } |
1450 | |
1451 | return 0; |
1452 | err: |
1453 | /* In case of errors, release planes that were already acquired */ |
1454 | __vb2_buf_dmabuf_put(vb); |
1455 | |
1456 | return ret; |
1457 | } |
1458 | |
1459 | /* |
1460 | * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing |
1461 | */ |
1462 | static void __enqueue_in_driver(struct vb2_buffer *vb) |
1463 | { |
1464 | struct vb2_queue *q = vb->vb2_queue; |
1465 | |
1466 | vb->state = VB2_BUF_STATE_ACTIVE; |
1467 | atomic_inc(v: &q->owned_by_drv_count); |
1468 | |
1469 | trace_vb2_buf_queue(q, vb); |
1470 | |
1471 | call_void_vb_qop(vb, buf_queue, vb); |
1472 | } |
1473 | |
1474 | static int __buf_prepare(struct vb2_buffer *vb) |
1475 | { |
1476 | struct vb2_queue *q = vb->vb2_queue; |
1477 | enum vb2_buffer_state orig_state = vb->state; |
1478 | int ret; |
1479 | |
1480 | if (q->error) { |
1481 | dprintk(q, 1, "fatal error occurred on queue\n" ); |
1482 | return -EIO; |
1483 | } |
1484 | |
1485 | if (vb->prepared) |
1486 | return 0; |
1487 | WARN_ON(vb->synced); |
1488 | |
1489 | if (q->is_output) { |
1490 | ret = call_vb_qop(vb, buf_out_validate, vb); |
1491 | if (ret) { |
1492 | dprintk(q, 1, "buffer validation failed\n" ); |
1493 | return ret; |
1494 | } |
1495 | } |
1496 | |
1497 | vb->state = VB2_BUF_STATE_PREPARING; |
1498 | |
1499 | switch (q->memory) { |
1500 | case VB2_MEMORY_MMAP: |
1501 | ret = __prepare_mmap(vb); |
1502 | break; |
1503 | case VB2_MEMORY_USERPTR: |
1504 | ret = __prepare_userptr(vb); |
1505 | break; |
1506 | case VB2_MEMORY_DMABUF: |
1507 | ret = __prepare_dmabuf(vb); |
1508 | break; |
1509 | default: |
1510 | WARN(1, "Invalid queue type\n" ); |
1511 | ret = -EINVAL; |
1512 | break; |
1513 | } |
1514 | |
1515 | if (ret) { |
1516 | dprintk(q, 1, "buffer preparation failed: %d\n" , ret); |
1517 | vb->state = orig_state; |
1518 | return ret; |
1519 | } |
1520 | |
1521 | __vb2_buf_mem_prepare(vb); |
1522 | vb->prepared = 1; |
1523 | vb->state = orig_state; |
1524 | |
1525 | return 0; |
1526 | } |
1527 | |
1528 | static int vb2_req_prepare(struct media_request_object *obj) |
1529 | { |
1530 | struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); |
1531 | int ret; |
1532 | |
1533 | if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) |
1534 | return -EINVAL; |
1535 | |
1536 | mutex_lock(vb->vb2_queue->lock); |
1537 | ret = __buf_prepare(vb); |
1538 | mutex_unlock(lock: vb->vb2_queue->lock); |
1539 | return ret; |
1540 | } |
1541 | |
1542 | static void __vb2_dqbuf(struct vb2_buffer *vb); |
1543 | |
1544 | static void vb2_req_unprepare(struct media_request_object *obj) |
1545 | { |
1546 | struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); |
1547 | |
1548 | mutex_lock(vb->vb2_queue->lock); |
1549 | __vb2_dqbuf(vb); |
1550 | vb->state = VB2_BUF_STATE_IN_REQUEST; |
1551 | mutex_unlock(lock: vb->vb2_queue->lock); |
1552 | WARN_ON(!vb->req_obj.req); |
1553 | } |
1554 | |
1555 | static void vb2_req_queue(struct media_request_object *obj) |
1556 | { |
1557 | struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); |
1558 | int err; |
1559 | |
1560 | mutex_lock(vb->vb2_queue->lock); |
1561 | /* |
1562 | * There is no method to propagate an error from vb2_core_qbuf(), |
1563 | * so if this returns a non-0 value, then WARN. |
1564 | * |
1565 | * The only exception is -EIO which is returned if q->error is |
1566 | * set. We just ignore that, and expect this will be caught the |
1567 | * next time vb2_req_prepare() is called. |
1568 | */ |
1569 | err = vb2_core_qbuf(q: vb->vb2_queue, vb, NULL, NULL); |
1570 | WARN_ON_ONCE(err && err != -EIO); |
1571 | mutex_unlock(lock: vb->vb2_queue->lock); |
1572 | } |
1573 | |
1574 | static void vb2_req_unbind(struct media_request_object *obj) |
1575 | { |
1576 | struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); |
1577 | |
1578 | if (vb->state == VB2_BUF_STATE_IN_REQUEST) |
1579 | call_void_bufop(vb->vb2_queue, init_buffer, vb); |
1580 | } |
1581 | |
1582 | static void vb2_req_release(struct media_request_object *obj) |
1583 | { |
1584 | struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); |
1585 | |
1586 | if (vb->state == VB2_BUF_STATE_IN_REQUEST) { |
1587 | vb->state = VB2_BUF_STATE_DEQUEUED; |
1588 | if (vb->request) |
1589 | media_request_put(req: vb->request); |
1590 | vb->request = NULL; |
1591 | } |
1592 | } |
1593 | |
1594 | static const struct media_request_object_ops vb2_core_req_ops = { |
1595 | .prepare = vb2_req_prepare, |
1596 | .unprepare = vb2_req_unprepare, |
1597 | .queue = vb2_req_queue, |
1598 | .unbind = vb2_req_unbind, |
1599 | .release = vb2_req_release, |
1600 | }; |
1601 | |
1602 | bool vb2_request_object_is_buffer(struct media_request_object *obj) |
1603 | { |
1604 | return obj->ops == &vb2_core_req_ops; |
1605 | } |
1606 | EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); |
1607 | |
1608 | unsigned int vb2_request_buffer_cnt(struct media_request *req) |
1609 | { |
1610 | struct media_request_object *obj; |
1611 | unsigned long flags; |
1612 | unsigned int buffer_cnt = 0; |
1613 | |
1614 | spin_lock_irqsave(&req->lock, flags); |
1615 | list_for_each_entry(obj, &req->objects, list) |
1616 | if (vb2_request_object_is_buffer(obj)) |
1617 | buffer_cnt++; |
1618 | spin_unlock_irqrestore(lock: &req->lock, flags); |
1619 | |
1620 | return buffer_cnt; |
1621 | } |
1622 | EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); |
1623 | |
1624 | int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb) |
1625 | { |
1626 | int ret; |
1627 | |
1628 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { |
1629 | dprintk(q, 1, "invalid buffer state %s\n" , |
1630 | vb2_state_name(vb->state)); |
1631 | return -EINVAL; |
1632 | } |
1633 | if (vb->prepared) { |
1634 | dprintk(q, 1, "buffer already prepared\n" ); |
1635 | return -EINVAL; |
1636 | } |
1637 | |
1638 | ret = __buf_prepare(vb); |
1639 | if (ret) |
1640 | return ret; |
1641 | |
1642 | /* Fill buffer information for the userspace */ |
1643 | call_void_bufop(q, fill_user_buffer, vb, pb); |
1644 | |
1645 | dprintk(q, 2, "prepare of buffer %d succeeded\n" , vb->index); |
1646 | |
1647 | return 0; |
1648 | } |
1649 | EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); |
1650 | |
1651 | /* |
1652 | * vb2_start_streaming() - Attempt to start streaming. |
1653 | * @q: videobuf2 queue |
1654 | * |
1655 | * Attempt to start streaming. When this function is called there must be |
1656 | * at least q->min_queued_buffers queued up (i.e. the minimum |
1657 | * number of buffers required for the DMA engine to function). If the |
1658 | * @start_streaming op fails it is supposed to return all the driver-owned |
1659 | * buffers back to vb2 in state QUEUED. Check if that happened and if |
1660 | * not warn and reclaim them forcefully. |
1661 | */ |
1662 | static int vb2_start_streaming(struct vb2_queue *q) |
1663 | { |
1664 | struct vb2_buffer *vb; |
1665 | int ret; |
1666 | |
1667 | /* |
1668 | * If any buffers were queued before streamon, |
1669 | * we can now pass them to driver for processing. |
1670 | */ |
1671 | list_for_each_entry(vb, &q->queued_list, queued_entry) |
1672 | __enqueue_in_driver(vb); |
1673 | |
1674 | /* Tell the driver to start streaming */ |
1675 | q->start_streaming_called = 1; |
1676 | ret = call_qop(q, start_streaming, q, |
1677 | atomic_read(&q->owned_by_drv_count)); |
1678 | if (!ret) |
1679 | return 0; |
1680 | |
1681 | q->start_streaming_called = 0; |
1682 | |
1683 | dprintk(q, 1, "driver refused to start streaming\n" ); |
1684 | /* |
1685 | * If you see this warning, then the driver isn't cleaning up properly |
1686 | * after a failed start_streaming(). See the start_streaming() |
1687 | * documentation in videobuf2-core.h for more information how buffers |
1688 | * should be returned to vb2 in start_streaming(). |
1689 | */ |
1690 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { |
1691 | unsigned i; |
1692 | |
1693 | /* |
1694 | * Forcefully reclaim buffers if the driver did not |
1695 | * correctly return them to vb2. |
1696 | */ |
1697 | for (i = 0; i < vb2_get_num_buffers(q); ++i) { |
1698 | vb = vb2_get_buffer(q, index: i); |
1699 | |
1700 | if (!vb) |
1701 | continue; |
1702 | |
1703 | if (vb->state == VB2_BUF_STATE_ACTIVE) |
1704 | vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); |
1705 | } |
1706 | /* Must be zero now */ |
1707 | WARN_ON(atomic_read(&q->owned_by_drv_count)); |
1708 | } |
1709 | /* |
1710 | * If done_list is not empty, then start_streaming() didn't call |
1711 | * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or |
1712 | * STATE_DONE. |
1713 | */ |
1714 | WARN_ON(!list_empty(&q->done_list)); |
1715 | return ret; |
1716 | } |
1717 | |
1718 | int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb, |
1719 | struct media_request *req) |
1720 | { |
1721 | enum vb2_buffer_state orig_state; |
1722 | int ret; |
1723 | |
1724 | if (q->error) { |
1725 | dprintk(q, 1, "fatal error occurred on queue\n" ); |
1726 | return -EIO; |
1727 | } |
1728 | |
1729 | if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && |
1730 | q->requires_requests) { |
1731 | dprintk(q, 1, "qbuf requires a request\n" ); |
1732 | return -EBADR; |
1733 | } |
1734 | |
1735 | if ((req && q->uses_qbuf) || |
1736 | (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && |
1737 | q->uses_requests)) { |
1738 | dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n" ); |
1739 | return -EBUSY; |
1740 | } |
1741 | |
1742 | if (req) { |
1743 | int ret; |
1744 | |
1745 | q->uses_requests = 1; |
1746 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { |
1747 | dprintk(q, 1, "buffer %d not in dequeued state\n" , |
1748 | vb->index); |
1749 | return -EINVAL; |
1750 | } |
1751 | |
1752 | if (q->is_output && !vb->prepared) { |
1753 | ret = call_vb_qop(vb, buf_out_validate, vb); |
1754 | if (ret) { |
1755 | dprintk(q, 1, "buffer validation failed\n" ); |
1756 | return ret; |
1757 | } |
1758 | } |
1759 | |
1760 | media_request_object_init(obj: &vb->req_obj); |
1761 | |
1762 | /* Make sure the request is in a safe state for updating. */ |
1763 | ret = media_request_lock_for_update(req); |
1764 | if (ret) |
1765 | return ret; |
1766 | ret = media_request_object_bind(req, ops: &vb2_core_req_ops, |
1767 | priv: q, is_buffer: true, obj: &vb->req_obj); |
1768 | media_request_unlock_for_update(req); |
1769 | if (ret) |
1770 | return ret; |
1771 | |
1772 | vb->state = VB2_BUF_STATE_IN_REQUEST; |
1773 | |
1774 | /* |
1775 | * Increment the refcount and store the request. |
1776 | * The request refcount is decremented again when the |
1777 | * buffer is dequeued. This is to prevent vb2_buffer_done() |
1778 | * from freeing the request from interrupt context, which can |
1779 | * happen if the application closed the request fd after |
1780 | * queueing the request. |
1781 | */ |
1782 | media_request_get(req); |
1783 | vb->request = req; |
1784 | |
1785 | /* Fill buffer information for the userspace */ |
1786 | if (pb) { |
1787 | call_void_bufop(q, copy_timestamp, vb, pb); |
1788 | call_void_bufop(q, fill_user_buffer, vb, pb); |
1789 | } |
1790 | |
1791 | dprintk(q, 2, "qbuf of buffer %d succeeded\n" , vb->index); |
1792 | return 0; |
1793 | } |
1794 | |
1795 | if (vb->state != VB2_BUF_STATE_IN_REQUEST) |
1796 | q->uses_qbuf = 1; |
1797 | |
1798 | switch (vb->state) { |
1799 | case VB2_BUF_STATE_DEQUEUED: |
1800 | case VB2_BUF_STATE_IN_REQUEST: |
1801 | if (!vb->prepared) { |
1802 | ret = __buf_prepare(vb); |
1803 | if (ret) |
1804 | return ret; |
1805 | } |
1806 | break; |
1807 | case VB2_BUF_STATE_PREPARING: |
1808 | dprintk(q, 1, "buffer still being prepared\n" ); |
1809 | return -EINVAL; |
1810 | default: |
1811 | dprintk(q, 1, "invalid buffer state %s\n" , |
1812 | vb2_state_name(vb->state)); |
1813 | return -EINVAL; |
1814 | } |
1815 | |
1816 | /* |
1817 | * Add to the queued buffers list, a buffer will stay on it until |
1818 | * dequeued in dqbuf. |
1819 | */ |
1820 | orig_state = vb->state; |
1821 | list_add_tail(new: &vb->queued_entry, head: &q->queued_list); |
1822 | q->queued_count++; |
1823 | q->waiting_for_buffers = false; |
1824 | vb->state = VB2_BUF_STATE_QUEUED; |
1825 | |
1826 | if (pb) |
1827 | call_void_bufop(q, copy_timestamp, vb, pb); |
1828 | |
1829 | trace_vb2_qbuf(q, vb); |
1830 | |
1831 | /* |
1832 | * If already streaming, give the buffer to driver for processing. |
1833 | * If not, the buffer will be given to driver on next streamon. |
1834 | */ |
1835 | if (q->start_streaming_called) |
1836 | __enqueue_in_driver(vb); |
1837 | |
1838 | /* Fill buffer information for the userspace */ |
1839 | if (pb) |
1840 | call_void_bufop(q, fill_user_buffer, vb, pb); |
1841 | |
1842 | /* |
1843 | * If streamon has been called, and we haven't yet called |
1844 | * start_streaming() since not enough buffers were queued, and |
1845 | * we now have reached the minimum number of queued buffers, |
1846 | * then we can finally call start_streaming(). |
1847 | */ |
1848 | if (q->streaming && !q->start_streaming_called && |
1849 | q->queued_count >= q->min_queued_buffers) { |
1850 | ret = vb2_start_streaming(q); |
1851 | if (ret) { |
1852 | /* |
1853 | * Since vb2_core_qbuf will return with an error, |
1854 | * we should return it to state DEQUEUED since |
1855 | * the error indicates that the buffer wasn't queued. |
1856 | */ |
1857 | list_del(entry: &vb->queued_entry); |
1858 | q->queued_count--; |
1859 | vb->state = orig_state; |
1860 | return ret; |
1861 | } |
1862 | } |
1863 | |
1864 | dprintk(q, 2, "qbuf of buffer %d succeeded\n" , vb->index); |
1865 | return 0; |
1866 | } |
1867 | EXPORT_SYMBOL_GPL(vb2_core_qbuf); |
1868 | |
1869 | /* |
1870 | * __vb2_wait_for_done_vb() - wait for a buffer to become available |
1871 | * for dequeuing |
1872 | * |
1873 | * Will sleep if required for nonblocking == false. |
1874 | */ |
1875 | static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) |
1876 | { |
1877 | /* |
1878 | * All operations on vb_done_list are performed under done_lock |
1879 | * spinlock protection. However, buffers may be removed from |
1880 | * it and returned to userspace only while holding both driver's |
1881 | * lock and the done_lock spinlock. Thus we can be sure that as |
1882 | * long as we hold the driver's lock, the list will remain not |
1883 | * empty if list_empty() check succeeds. |
1884 | */ |
1885 | |
1886 | for (;;) { |
1887 | int ret; |
1888 | |
1889 | if (q->waiting_in_dqbuf) { |
1890 | dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n" ); |
1891 | return -EBUSY; |
1892 | } |
1893 | |
1894 | if (!q->streaming) { |
1895 | dprintk(q, 1, "streaming off, will not wait for buffers\n" ); |
1896 | return -EINVAL; |
1897 | } |
1898 | |
1899 | if (q->error) { |
1900 | dprintk(q, 1, "Queue in error state, will not wait for buffers\n" ); |
1901 | return -EIO; |
1902 | } |
1903 | |
1904 | if (q->last_buffer_dequeued) { |
1905 | dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n" ); |
1906 | return -EPIPE; |
1907 | } |
1908 | |
1909 | if (!list_empty(head: &q->done_list)) { |
1910 | /* |
1911 | * Found a buffer that we were waiting for. |
1912 | */ |
1913 | break; |
1914 | } |
1915 | |
1916 | if (nonblocking) { |
1917 | dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n" ); |
1918 | return -EAGAIN; |
1919 | } |
1920 | |
1921 | q->waiting_in_dqbuf = 1; |
1922 | /* |
1923 | * We are streaming and blocking, wait for another buffer to |
1924 | * become ready or for streamoff. Driver's lock is released to |
1925 | * allow streamoff or qbuf to be called while waiting. |
1926 | */ |
1927 | call_void_qop(q, wait_prepare, q); |
1928 | |
1929 | /* |
1930 | * All locks have been released, it is safe to sleep now. |
1931 | */ |
1932 | dprintk(q, 3, "will sleep waiting for buffers\n" ); |
1933 | ret = wait_event_interruptible(q->done_wq, |
1934 | !list_empty(&q->done_list) || !q->streaming || |
1935 | q->error); |
1936 | |
1937 | /* |
1938 | * We need to reevaluate both conditions again after reacquiring |
1939 | * the locks or return an error if one occurred. |
1940 | */ |
1941 | call_void_qop(q, wait_finish, q); |
1942 | q->waiting_in_dqbuf = 0; |
1943 | if (ret) { |
1944 | dprintk(q, 1, "sleep was interrupted\n" ); |
1945 | return ret; |
1946 | } |
1947 | } |
1948 | return 0; |
1949 | } |
1950 | |
1951 | /* |
1952 | * __vb2_get_done_vb() - get a buffer ready for dequeuing |
1953 | * |
1954 | * Will sleep if required for nonblocking == false. |
1955 | */ |
1956 | static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, |
1957 | void *pb, int nonblocking) |
1958 | { |
1959 | unsigned long flags; |
1960 | int ret = 0; |
1961 | |
1962 | /* |
1963 | * Wait for at least one buffer to become available on the done_list. |
1964 | */ |
1965 | ret = __vb2_wait_for_done_vb(q, nonblocking); |
1966 | if (ret) |
1967 | return ret; |
1968 | |
1969 | /* |
1970 | * Driver's lock has been held since we last verified that done_list |
1971 | * is not empty, so no need for another list_empty(done_list) check. |
1972 | */ |
1973 | spin_lock_irqsave(&q->done_lock, flags); |
1974 | *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); |
1975 | /* |
1976 | * Only remove the buffer from done_list if all planes can be |
1977 | * handled. Some cases such as V4L2 file I/O and DVB have pb |
1978 | * == NULL; skip the check then as there's nothing to verify. |
1979 | */ |
1980 | if (pb) |
1981 | ret = call_bufop(q, verify_planes_array, *vb, pb); |
1982 | if (!ret) |
1983 | list_del(entry: &(*vb)->done_entry); |
1984 | spin_unlock_irqrestore(lock: &q->done_lock, flags); |
1985 | |
1986 | return ret; |
1987 | } |
1988 | |
1989 | int vb2_wait_for_all_buffers(struct vb2_queue *q) |
1990 | { |
1991 | if (!q->streaming) { |
1992 | dprintk(q, 1, "streaming off, will not wait for buffers\n" ); |
1993 | return -EINVAL; |
1994 | } |
1995 | |
1996 | if (q->start_streaming_called) |
1997 | wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); |
1998 | return 0; |
1999 | } |
2000 | EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); |
2001 | |
2002 | /* |
2003 | * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state |
2004 | */ |
2005 | static void __vb2_dqbuf(struct vb2_buffer *vb) |
2006 | { |
2007 | struct vb2_queue *q = vb->vb2_queue; |
2008 | |
2009 | /* nothing to do if the buffer is already dequeued */ |
2010 | if (vb->state == VB2_BUF_STATE_DEQUEUED) |
2011 | return; |
2012 | |
2013 | vb->state = VB2_BUF_STATE_DEQUEUED; |
2014 | |
2015 | call_void_bufop(q, init_buffer, vb); |
2016 | } |
2017 | |
2018 | int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, |
2019 | bool nonblocking) |
2020 | { |
2021 | struct vb2_buffer *vb = NULL; |
2022 | int ret; |
2023 | |
2024 | ret = __vb2_get_done_vb(q, vb: &vb, pb, nonblocking); |
2025 | if (ret < 0) |
2026 | return ret; |
2027 | |
2028 | switch (vb->state) { |
2029 | case VB2_BUF_STATE_DONE: |
2030 | dprintk(q, 3, "returning done buffer\n" ); |
2031 | break; |
2032 | case VB2_BUF_STATE_ERROR: |
2033 | dprintk(q, 3, "returning done buffer with errors\n" ); |
2034 | break; |
2035 | default: |
2036 | dprintk(q, 1, "invalid buffer state %s\n" , |
2037 | vb2_state_name(vb->state)); |
2038 | return -EINVAL; |
2039 | } |
2040 | |
2041 | call_void_vb_qop(vb, buf_finish, vb); |
2042 | vb->prepared = 0; |
2043 | |
2044 | if (pindex) |
2045 | *pindex = vb->index; |
2046 | |
2047 | /* Fill buffer information for the userspace */ |
2048 | if (pb) |
2049 | call_void_bufop(q, fill_user_buffer, vb, pb); |
2050 | |
2051 | /* Remove from vb2 queue */ |
2052 | list_del(entry: &vb->queued_entry); |
2053 | q->queued_count--; |
2054 | |
2055 | trace_vb2_dqbuf(q, vb); |
2056 | |
2057 | /* go back to dequeued state */ |
2058 | __vb2_dqbuf(vb); |
2059 | |
2060 | if (WARN_ON(vb->req_obj.req)) { |
2061 | media_request_object_unbind(obj: &vb->req_obj); |
2062 | media_request_object_put(obj: &vb->req_obj); |
2063 | } |
2064 | if (vb->request) |
2065 | media_request_put(req: vb->request); |
2066 | vb->request = NULL; |
2067 | |
2068 | dprintk(q, 2, "dqbuf of buffer %d, state: %s\n" , |
2069 | vb->index, vb2_state_name(vb->state)); |
2070 | |
2071 | return 0; |
2072 | |
2073 | } |
2074 | EXPORT_SYMBOL_GPL(vb2_core_dqbuf); |
2075 | |
2076 | /* |
2077 | * __vb2_queue_cancel() - cancel and stop (pause) streaming |
2078 | * |
2079 | * Removes all queued buffers from driver's queue and all buffers queued by |
2080 | * userspace from vb2's queue. Returns to state after reqbufs. |
2081 | */ |
2082 | static void __vb2_queue_cancel(struct vb2_queue *q) |
2083 | { |
2084 | unsigned int i; |
2085 | |
2086 | /* |
2087 | * Tell driver to stop all transactions and release all queued |
2088 | * buffers. |
2089 | */ |
2090 | if (q->start_streaming_called) |
2091 | call_void_qop(q, stop_streaming, q); |
2092 | |
2093 | if (q->streaming) |
2094 | call_void_qop(q, unprepare_streaming, q); |
2095 | |
2096 | /* |
2097 | * If you see this warning, then the driver isn't cleaning up properly |
2098 | * in stop_streaming(). See the stop_streaming() documentation in |
2099 | * videobuf2-core.h for more information how buffers should be returned |
2100 | * to vb2 in stop_streaming(). |
2101 | */ |
2102 | if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { |
2103 | for (i = 0; i < vb2_get_num_buffers(q); i++) { |
2104 | struct vb2_buffer *vb = vb2_get_buffer(q, index: i); |
2105 | |
2106 | if (!vb) |
2107 | continue; |
2108 | |
2109 | if (vb->state == VB2_BUF_STATE_ACTIVE) { |
2110 | pr_warn("driver bug: stop_streaming operation is leaving buffer %u in active state\n" , |
2111 | vb->index); |
2112 | vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); |
2113 | } |
2114 | } |
2115 | /* Must be zero now */ |
2116 | WARN_ON(atomic_read(&q->owned_by_drv_count)); |
2117 | } |
2118 | |
2119 | q->streaming = 0; |
2120 | q->start_streaming_called = 0; |
2121 | q->queued_count = 0; |
2122 | q->error = 0; |
2123 | q->uses_requests = 0; |
2124 | q->uses_qbuf = 0; |
2125 | |
2126 | /* |
2127 | * Remove all buffers from vb2's list... |
2128 | */ |
2129 | INIT_LIST_HEAD(list: &q->queued_list); |
2130 | /* |
2131 | * ...and done list; userspace will not receive any buffers it |
2132 | * has not already dequeued before initiating cancel. |
2133 | */ |
2134 | INIT_LIST_HEAD(list: &q->done_list); |
2135 | atomic_set(v: &q->owned_by_drv_count, i: 0); |
2136 | wake_up_all(&q->done_wq); |
2137 | |
2138 | /* |
2139 | * Reinitialize all buffers for next use. |
2140 | * Make sure to call buf_finish for any queued buffers. Normally |
2141 | * that's done in dqbuf, but that's not going to happen when we |
2142 | * cancel the whole queue. Note: this code belongs here, not in |
2143 | * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical |
2144 | * call to __fill_user_buffer() after buf_finish(). That order can't |
2145 | * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). |
2146 | */ |
2147 | for (i = 0; i < vb2_get_num_buffers(q); i++) { |
2148 | struct vb2_buffer *vb; |
2149 | struct media_request *req; |
2150 | |
2151 | vb = vb2_get_buffer(q, index: i); |
2152 | if (!vb) |
2153 | continue; |
2154 | |
2155 | req = vb->req_obj.req; |
2156 | /* |
2157 | * If a request is associated with this buffer, then |
2158 | * call buf_request_cancel() to give the driver to complete() |
2159 | * related request objects. Otherwise those objects would |
2160 | * never complete. |
2161 | */ |
2162 | if (req) { |
2163 | enum media_request_state state; |
2164 | unsigned long flags; |
2165 | |
2166 | spin_lock_irqsave(&req->lock, flags); |
2167 | state = req->state; |
2168 | spin_unlock_irqrestore(lock: &req->lock, flags); |
2169 | |
2170 | if (state == MEDIA_REQUEST_STATE_QUEUED) |
2171 | call_void_vb_qop(vb, buf_request_complete, vb); |
2172 | } |
2173 | |
2174 | __vb2_buf_mem_finish(vb); |
2175 | |
2176 | if (vb->prepared) { |
2177 | call_void_vb_qop(vb, buf_finish, vb); |
2178 | vb->prepared = 0; |
2179 | } |
2180 | __vb2_dqbuf(vb); |
2181 | |
2182 | if (vb->req_obj.req) { |
2183 | media_request_object_unbind(obj: &vb->req_obj); |
2184 | media_request_object_put(obj: &vb->req_obj); |
2185 | } |
2186 | if (vb->request) |
2187 | media_request_put(req: vb->request); |
2188 | vb->request = NULL; |
2189 | vb->copied_timestamp = 0; |
2190 | } |
2191 | } |
2192 | |
2193 | int vb2_core_streamon(struct vb2_queue *q, unsigned int type) |
2194 | { |
2195 | unsigned int q_num_bufs = vb2_get_num_buffers(q); |
2196 | int ret; |
2197 | |
2198 | if (type != q->type) { |
2199 | dprintk(q, 1, "invalid stream type\n" ); |
2200 | return -EINVAL; |
2201 | } |
2202 | |
2203 | if (q->streaming) { |
2204 | dprintk(q, 3, "already streaming\n" ); |
2205 | return 0; |
2206 | } |
2207 | |
2208 | if (!q_num_bufs) { |
2209 | dprintk(q, 1, "no buffers have been allocated\n" ); |
2210 | return -EINVAL; |
2211 | } |
2212 | |
2213 | if (q_num_bufs < q->min_queued_buffers) { |
2214 | dprintk(q, 1, "need at least %u queued buffers\n" , |
2215 | q->min_queued_buffers); |
2216 | return -EINVAL; |
2217 | } |
2218 | |
2219 | ret = call_qop(q, prepare_streaming, q); |
2220 | if (ret) |
2221 | return ret; |
2222 | |
2223 | /* |
2224 | * Tell driver to start streaming provided sufficient buffers |
2225 | * are available. |
2226 | */ |
2227 | if (q->queued_count >= q->min_queued_buffers) { |
2228 | ret = vb2_start_streaming(q); |
2229 | if (ret) |
2230 | goto unprepare; |
2231 | } |
2232 | |
2233 | q->streaming = 1; |
2234 | |
2235 | dprintk(q, 3, "successful\n" ); |
2236 | return 0; |
2237 | |
2238 | unprepare: |
2239 | call_void_qop(q, unprepare_streaming, q); |
2240 | return ret; |
2241 | } |
2242 | EXPORT_SYMBOL_GPL(vb2_core_streamon); |
2243 | |
2244 | void vb2_queue_error(struct vb2_queue *q) |
2245 | { |
2246 | q->error = 1; |
2247 | |
2248 | wake_up_all(&q->done_wq); |
2249 | } |
2250 | EXPORT_SYMBOL_GPL(vb2_queue_error); |
2251 | |
2252 | int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) |
2253 | { |
2254 | if (type != q->type) { |
2255 | dprintk(q, 1, "invalid stream type\n" ); |
2256 | return -EINVAL; |
2257 | } |
2258 | |
2259 | /* |
2260 | * Cancel will pause streaming and remove all buffers from the driver |
2261 | * and vb2, effectively returning control over them to userspace. |
2262 | * |
2263 | * Note that we do this even if q->streaming == 0: if you prepare or |
2264 | * queue buffers, and then call streamoff without ever having called |
2265 | * streamon, you would still expect those buffers to be returned to |
2266 | * their normal dequeued state. |
2267 | */ |
2268 | __vb2_queue_cancel(q); |
2269 | q->waiting_for_buffers = !q->is_output; |
2270 | q->last_buffer_dequeued = false; |
2271 | |
2272 | dprintk(q, 3, "successful\n" ); |
2273 | return 0; |
2274 | } |
2275 | EXPORT_SYMBOL_GPL(vb2_core_streamoff); |
2276 | |
2277 | /* |
2278 | * __find_plane_by_offset() - find plane associated with the given offset |
2279 | */ |
2280 | static int __find_plane_by_offset(struct vb2_queue *q, unsigned long offset, |
2281 | struct vb2_buffer **vb, unsigned int *plane) |
2282 | { |
2283 | unsigned int buffer; |
2284 | |
2285 | /* |
2286 | * Sanity checks to ensure the lock is held, MEMORY_MMAP is |
2287 | * used and fileio isn't active. |
2288 | */ |
2289 | lockdep_assert_held(&q->mmap_lock); |
2290 | |
2291 | if (q->memory != VB2_MEMORY_MMAP) { |
2292 | dprintk(q, 1, "queue is not currently set up for mmap\n" ); |
2293 | return -EINVAL; |
2294 | } |
2295 | |
2296 | if (vb2_fileio_is_active(q)) { |
2297 | dprintk(q, 1, "file io in progress\n" ); |
2298 | return -EBUSY; |
2299 | } |
2300 | |
2301 | /* Get buffer and plane from the offset */ |
2302 | buffer = (offset >> PLANE_INDEX_SHIFT) & BUFFER_INDEX_MASK; |
2303 | *plane = (offset >> PAGE_SHIFT) & PLANE_INDEX_MASK; |
2304 | |
2305 | *vb = vb2_get_buffer(q, index: buffer); |
2306 | if (!*vb) |
2307 | return -EINVAL; |
2308 | if (*plane >= (*vb)->num_planes) |
2309 | return -EINVAL; |
2310 | |
2311 | return 0; |
2312 | } |
2313 | |
2314 | int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, |
2315 | struct vb2_buffer *vb, unsigned int plane, unsigned int flags) |
2316 | { |
2317 | struct vb2_plane *vb_plane; |
2318 | int ret; |
2319 | struct dma_buf *dbuf; |
2320 | |
2321 | if (q->memory != VB2_MEMORY_MMAP) { |
2322 | dprintk(q, 1, "queue is not currently set up for mmap\n" ); |
2323 | return -EINVAL; |
2324 | } |
2325 | |
2326 | if (!q->mem_ops->get_dmabuf) { |
2327 | dprintk(q, 1, "queue does not support DMA buffer exporting\n" ); |
2328 | return -EINVAL; |
2329 | } |
2330 | |
2331 | if (flags & ~(O_CLOEXEC | O_ACCMODE)) { |
2332 | dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n" ); |
2333 | return -EINVAL; |
2334 | } |
2335 | |
2336 | if (type != q->type) { |
2337 | dprintk(q, 1, "invalid buffer type\n" ); |
2338 | return -EINVAL; |
2339 | } |
2340 | |
2341 | if (plane >= vb->num_planes) { |
2342 | dprintk(q, 1, "buffer plane out of range\n" ); |
2343 | return -EINVAL; |
2344 | } |
2345 | |
2346 | if (vb2_fileio_is_active(q)) { |
2347 | dprintk(q, 1, "expbuf: file io in progress\n" ); |
2348 | return -EBUSY; |
2349 | } |
2350 | |
2351 | vb_plane = &vb->planes[plane]; |
2352 | |
2353 | dbuf = call_ptr_memop(get_dmabuf, |
2354 | vb, |
2355 | vb_plane->mem_priv, |
2356 | flags & O_ACCMODE); |
2357 | if (IS_ERR_OR_NULL(ptr: dbuf)) { |
2358 | dprintk(q, 1, "failed to export buffer %d, plane %d\n" , |
2359 | vb->index, plane); |
2360 | return -EINVAL; |
2361 | } |
2362 | |
2363 | ret = dma_buf_fd(dmabuf: dbuf, flags: flags & ~O_ACCMODE); |
2364 | if (ret < 0) { |
2365 | dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n" , |
2366 | vb->index, plane, ret); |
2367 | dma_buf_put(dmabuf: dbuf); |
2368 | return ret; |
2369 | } |
2370 | |
2371 | dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n" , |
2372 | vb->index, plane, ret); |
2373 | *fd = ret; |
2374 | |
2375 | return 0; |
2376 | } |
2377 | EXPORT_SYMBOL_GPL(vb2_core_expbuf); |
2378 | |
2379 | int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) |
2380 | { |
2381 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; |
2382 | struct vb2_buffer *vb; |
2383 | unsigned int plane = 0; |
2384 | int ret; |
2385 | unsigned long length; |
2386 | |
2387 | /* |
2388 | * Check memory area access mode. |
2389 | */ |
2390 | if (!(vma->vm_flags & VM_SHARED)) { |
2391 | dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n" ); |
2392 | return -EINVAL; |
2393 | } |
2394 | if (q->is_output) { |
2395 | if (!(vma->vm_flags & VM_WRITE)) { |
2396 | dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n" ); |
2397 | return -EINVAL; |
2398 | } |
2399 | } else { |
2400 | if (!(vma->vm_flags & VM_READ)) { |
2401 | dprintk(q, 1, "invalid vma flags, VM_READ needed\n" ); |
2402 | return -EINVAL; |
2403 | } |
2404 | } |
2405 | |
2406 | mutex_lock(&q->mmap_lock); |
2407 | |
2408 | /* |
2409 | * Find the plane corresponding to the offset passed by userspace. This |
2410 | * will return an error if not MEMORY_MMAP or file I/O is in progress. |
2411 | */ |
2412 | ret = __find_plane_by_offset(q, offset, vb: &vb, plane: &plane); |
2413 | if (ret) |
2414 | goto unlock; |
2415 | |
2416 | /* |
2417 | * MMAP requires page_aligned buffers. |
2418 | * The buffer length was page_aligned at __vb2_buf_mem_alloc(), |
2419 | * so, we need to do the same here. |
2420 | */ |
2421 | length = PAGE_ALIGN(vb->planes[plane].length); |
2422 | if (length < (vma->vm_end - vma->vm_start)) { |
2423 | dprintk(q, 1, |
2424 | "MMAP invalid, as it would overflow buffer length\n" ); |
2425 | ret = -EINVAL; |
2426 | goto unlock; |
2427 | } |
2428 | |
2429 | /* |
2430 | * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer, |
2431 | * not as a in-buffer offset. We always want to mmap a whole buffer |
2432 | * from its beginning. |
2433 | */ |
2434 | vma->vm_pgoff = 0; |
2435 | |
2436 | ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); |
2437 | |
2438 | unlock: |
2439 | mutex_unlock(lock: &q->mmap_lock); |
2440 | if (ret) |
2441 | return ret; |
2442 | |
2443 | dprintk(q, 3, "buffer %u, plane %d successfully mapped\n" , vb->index, plane); |
2444 | return 0; |
2445 | } |
2446 | EXPORT_SYMBOL_GPL(vb2_mmap); |
2447 | |
2448 | #ifndef CONFIG_MMU |
2449 | unsigned long vb2_get_unmapped_area(struct vb2_queue *q, |
2450 | unsigned long addr, |
2451 | unsigned long len, |
2452 | unsigned long pgoff, |
2453 | unsigned long flags) |
2454 | { |
2455 | unsigned long offset = pgoff << PAGE_SHIFT; |
2456 | struct vb2_buffer *vb; |
2457 | unsigned int plane; |
2458 | void *vaddr; |
2459 | int ret; |
2460 | |
2461 | mutex_lock(&q->mmap_lock); |
2462 | |
2463 | /* |
2464 | * Find the plane corresponding to the offset passed by userspace. This |
2465 | * will return an error if not MEMORY_MMAP or file I/O is in progress. |
2466 | */ |
2467 | ret = __find_plane_by_offset(q, offset, &vb, &plane); |
2468 | if (ret) |
2469 | goto unlock; |
2470 | |
2471 | vaddr = vb2_plane_vaddr(vb, plane); |
2472 | mutex_unlock(&q->mmap_lock); |
2473 | return vaddr ? (unsigned long)vaddr : -EINVAL; |
2474 | |
2475 | unlock: |
2476 | mutex_unlock(&q->mmap_lock); |
2477 | return ret; |
2478 | } |
2479 | EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); |
2480 | #endif |
2481 | |
2482 | int vb2_core_queue_init(struct vb2_queue *q) |
2483 | { |
2484 | /* |
2485 | * Sanity check |
2486 | */ |
2487 | /* |
2488 | * For drivers who don't support max_num_buffers ensure |
2489 | * a backward compatibility. |
2490 | */ |
2491 | if (!q->max_num_buffers) |
2492 | q->max_num_buffers = VB2_MAX_FRAME; |
2493 | |
2494 | /* The maximum is limited by offset cookie encoding pattern */ |
2495 | q->max_num_buffers = min_t(unsigned int, q->max_num_buffers, MAX_BUFFER_INDEX); |
2496 | |
2497 | if (WARN_ON(!q) || |
2498 | WARN_ON(!q->ops) || |
2499 | WARN_ON(!q->mem_ops) || |
2500 | WARN_ON(!q->type) || |
2501 | WARN_ON(!q->io_modes) || |
2502 | WARN_ON(!q->ops->queue_setup) || |
2503 | WARN_ON(!q->ops->buf_queue)) |
2504 | return -EINVAL; |
2505 | |
2506 | if (WARN_ON(q->max_num_buffers > MAX_BUFFER_INDEX) || |
2507 | WARN_ON(q->min_queued_buffers > q->max_num_buffers)) |
2508 | return -EINVAL; |
2509 | |
2510 | if (WARN_ON(q->requires_requests && !q->supports_requests)) |
2511 | return -EINVAL; |
2512 | |
2513 | /* |
2514 | * This combination is not allowed since a non-zero value of |
2515 | * q->min_queued_buffers can cause vb2_core_qbuf() to fail if |
2516 | * it has to call start_streaming(), and the Request API expects |
2517 | * that queueing a request (and thus queueing a buffer contained |
2518 | * in that request) will always succeed. There is no method of |
2519 | * propagating an error back to userspace. |
2520 | */ |
2521 | if (WARN_ON(q->supports_requests && q->min_queued_buffers)) |
2522 | return -EINVAL; |
2523 | |
2524 | INIT_LIST_HEAD(list: &q->queued_list); |
2525 | INIT_LIST_HEAD(list: &q->done_list); |
2526 | spin_lock_init(&q->done_lock); |
2527 | mutex_init(&q->mmap_lock); |
2528 | init_waitqueue_head(&q->done_wq); |
2529 | |
2530 | q->memory = VB2_MEMORY_UNKNOWN; |
2531 | |
2532 | if (q->buf_struct_size == 0) |
2533 | q->buf_struct_size = sizeof(struct vb2_buffer); |
2534 | |
2535 | if (q->bidirectional) |
2536 | q->dma_dir = DMA_BIDIRECTIONAL; |
2537 | else |
2538 | q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
2539 | |
2540 | if (q->name[0] == '\0') |
2541 | snprintf(buf: q->name, size: sizeof(q->name), fmt: "%s-%p" , |
2542 | q->is_output ? "out" : "cap" , q); |
2543 | |
2544 | return 0; |
2545 | } |
2546 | EXPORT_SYMBOL_GPL(vb2_core_queue_init); |
2547 | |
2548 | static int __vb2_init_fileio(struct vb2_queue *q, int read); |
2549 | static int __vb2_cleanup_fileio(struct vb2_queue *q); |
2550 | void vb2_core_queue_release(struct vb2_queue *q) |
2551 | { |
2552 | __vb2_cleanup_fileio(q); |
2553 | __vb2_queue_cancel(q); |
2554 | mutex_lock(&q->mmap_lock); |
2555 | __vb2_queue_free(q, buffers: vb2_get_num_buffers(q)); |
2556 | kfree(objp: q->bufs); |
2557 | q->bufs = NULL; |
2558 | mutex_unlock(lock: &q->mmap_lock); |
2559 | } |
2560 | EXPORT_SYMBOL_GPL(vb2_core_queue_release); |
2561 | |
2562 | __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, |
2563 | poll_table *wait) |
2564 | { |
2565 | __poll_t req_events = poll_requested_events(p: wait); |
2566 | struct vb2_buffer *vb = NULL; |
2567 | unsigned long flags; |
2568 | |
2569 | /* |
2570 | * poll_wait() MUST be called on the first invocation on all the |
2571 | * potential queues of interest, even if we are not interested in their |
2572 | * events during this first call. Failure to do so will result in |
2573 | * queue's events to be ignored because the poll_table won't be capable |
2574 | * of adding new wait queues thereafter. |
2575 | */ |
2576 | poll_wait(filp: file, wait_address: &q->done_wq, p: wait); |
2577 | |
2578 | if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) |
2579 | return 0; |
2580 | if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) |
2581 | return 0; |
2582 | |
2583 | /* |
2584 | * Start file I/O emulator only if streaming API has not been used yet. |
2585 | */ |
2586 | if (vb2_get_num_buffers(q) == 0 && !vb2_fileio_is_active(q)) { |
2587 | if (!q->is_output && (q->io_modes & VB2_READ) && |
2588 | (req_events & (EPOLLIN | EPOLLRDNORM))) { |
2589 | if (__vb2_init_fileio(q, read: 1)) |
2590 | return EPOLLERR; |
2591 | } |
2592 | if (q->is_output && (q->io_modes & VB2_WRITE) && |
2593 | (req_events & (EPOLLOUT | EPOLLWRNORM))) { |
2594 | if (__vb2_init_fileio(q, read: 0)) |
2595 | return EPOLLERR; |
2596 | /* |
2597 | * Write to OUTPUT queue can be done immediately. |
2598 | */ |
2599 | return EPOLLOUT | EPOLLWRNORM; |
2600 | } |
2601 | } |
2602 | |
2603 | /* |
2604 | * There is nothing to wait for if the queue isn't streaming, or if the |
2605 | * error flag is set. |
2606 | */ |
2607 | if (!vb2_is_streaming(q) || q->error) |
2608 | return EPOLLERR; |
2609 | |
2610 | /* |
2611 | * If this quirk is set and QBUF hasn't been called yet then |
2612 | * return EPOLLERR as well. This only affects capture queues, output |
2613 | * queues will always initialize waiting_for_buffers to false. |
2614 | * This quirk is set by V4L2 for backwards compatibility reasons. |
2615 | */ |
2616 | if (q->quirk_poll_must_check_waiting_for_buffers && |
2617 | q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) |
2618 | return EPOLLERR; |
2619 | |
2620 | /* |
2621 | * For output streams you can call write() as long as there are fewer |
2622 | * buffers queued than there are buffers available. |
2623 | */ |
2624 | if (q->is_output && q->fileio && q->queued_count < vb2_get_num_buffers(q)) |
2625 | return EPOLLOUT | EPOLLWRNORM; |
2626 | |
2627 | if (list_empty(head: &q->done_list)) { |
2628 | /* |
2629 | * If the last buffer was dequeued from a capture queue, |
2630 | * return immediately. DQBUF will return -EPIPE. |
2631 | */ |
2632 | if (q->last_buffer_dequeued) |
2633 | return EPOLLIN | EPOLLRDNORM; |
2634 | } |
2635 | |
2636 | /* |
2637 | * Take first buffer available for dequeuing. |
2638 | */ |
2639 | spin_lock_irqsave(&q->done_lock, flags); |
2640 | if (!list_empty(head: &q->done_list)) |
2641 | vb = list_first_entry(&q->done_list, struct vb2_buffer, |
2642 | done_entry); |
2643 | spin_unlock_irqrestore(lock: &q->done_lock, flags); |
2644 | |
2645 | if (vb && (vb->state == VB2_BUF_STATE_DONE |
2646 | || vb->state == VB2_BUF_STATE_ERROR)) { |
2647 | return (q->is_output) ? |
2648 | EPOLLOUT | EPOLLWRNORM : |
2649 | EPOLLIN | EPOLLRDNORM; |
2650 | } |
2651 | return 0; |
2652 | } |
2653 | EXPORT_SYMBOL_GPL(vb2_core_poll); |
2654 | |
2655 | /* |
2656 | * struct vb2_fileio_buf - buffer context used by file io emulator |
2657 | * |
2658 | * vb2 provides a compatibility layer and emulator of file io (read and |
2659 | * write) calls on top of streaming API. This structure is used for |
2660 | * tracking context related to the buffers. |
2661 | */ |
2662 | struct vb2_fileio_buf { |
2663 | void *vaddr; |
2664 | unsigned int size; |
2665 | unsigned int pos; |
2666 | unsigned int queued:1; |
2667 | }; |
2668 | |
2669 | /* |
2670 | * struct vb2_fileio_data - queue context used by file io emulator |
2671 | * |
2672 | * @cur_index: the index of the buffer currently being read from or |
2673 | * written to. If equal to number of buffers in the vb2_queue |
2674 | * then a new buffer must be dequeued. |
2675 | * @initial_index: in the read() case all buffers are queued up immediately |
2676 | * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles |
2677 | * buffers. However, in the write() case no buffers are initially |
2678 | * queued, instead whenever a buffer is full it is queued up by |
2679 | * __vb2_perform_fileio(). Only once all available buffers have |
2680 | * been queued up will __vb2_perform_fileio() start to dequeue |
2681 | * buffers. This means that initially __vb2_perform_fileio() |
2682 | * needs to know what buffer index to use when it is queuing up |
2683 | * the buffers for the first time. That initial index is stored |
2684 | * in this field. Once it is equal to number of buffers in the |
2685 | * vb2_queue all available buffers have been queued and |
2686 | * __vb2_perform_fileio() should start the normal dequeue/queue cycle. |
2687 | * |
2688 | * vb2 provides a compatibility layer and emulator of file io (read and |
2689 | * write) calls on top of streaming API. For proper operation it required |
2690 | * this structure to save the driver state between each call of the read |
2691 | * or write function. |
2692 | */ |
2693 | struct vb2_fileio_data { |
2694 | unsigned int count; |
2695 | unsigned int type; |
2696 | unsigned int memory; |
2697 | struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; |
2698 | unsigned int cur_index; |
2699 | unsigned int initial_index; |
2700 | unsigned int q_count; |
2701 | unsigned int dq_count; |
2702 | unsigned read_once:1; |
2703 | unsigned write_immediately:1; |
2704 | }; |
2705 | |
2706 | /* |
2707 | * __vb2_init_fileio() - initialize file io emulator |
2708 | * @q: videobuf2 queue |
2709 | * @read: mode selector (1 means read, 0 means write) |
2710 | */ |
2711 | static int __vb2_init_fileio(struct vb2_queue *q, int read) |
2712 | { |
2713 | struct vb2_fileio_data *fileio; |
2714 | struct vb2_buffer *vb; |
2715 | int i, ret; |
2716 | unsigned int count = 0; |
2717 | |
2718 | /* |
2719 | * Sanity check |
2720 | */ |
2721 | if (WARN_ON((read && !(q->io_modes & VB2_READ)) || |
2722 | (!read && !(q->io_modes & VB2_WRITE)))) |
2723 | return -EINVAL; |
2724 | |
2725 | /* |
2726 | * Check if device supports mapping buffers to kernel virtual space. |
2727 | */ |
2728 | if (!q->mem_ops->vaddr) |
2729 | return -EBUSY; |
2730 | |
2731 | /* |
2732 | * Check if streaming api has not been already activated. |
2733 | */ |
2734 | if (q->streaming || vb2_get_num_buffers(q) > 0) |
2735 | return -EBUSY; |
2736 | |
2737 | /* |
2738 | * Start with q->min_queued_buffers + 1, driver can increase it in |
2739 | * queue_setup() |
2740 | * |
2741 | * 'min_queued_buffers' buffers need to be queued up before you |
2742 | * can start streaming, plus 1 for userspace (or in this case, |
2743 | * kernelspace) processing. |
2744 | */ |
2745 | count = max(2, q->min_queued_buffers + 1); |
2746 | |
2747 | dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n" , |
2748 | (read) ? "read" : "write" , count, q->fileio_read_once, |
2749 | q->fileio_write_immediately); |
2750 | |
2751 | fileio = kzalloc(size: sizeof(*fileio), GFP_KERNEL); |
2752 | if (fileio == NULL) |
2753 | return -ENOMEM; |
2754 | |
2755 | fileio->read_once = q->fileio_read_once; |
2756 | fileio->write_immediately = q->fileio_write_immediately; |
2757 | |
2758 | /* |
2759 | * Request buffers and use MMAP type to force driver |
2760 | * to allocate buffers by itself. |
2761 | */ |
2762 | fileio->count = count; |
2763 | fileio->memory = VB2_MEMORY_MMAP; |
2764 | fileio->type = q->type; |
2765 | q->fileio = fileio; |
2766 | ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); |
2767 | if (ret) |
2768 | goto err_kfree; |
2769 | |
2770 | /* |
2771 | * Userspace can never add or delete buffers later, so there |
2772 | * will never be holes. It is safe to assume that vb2_get_buffer(q, 0) |
2773 | * will always return a valid vb pointer |
2774 | */ |
2775 | vb = vb2_get_buffer(q, index: 0); |
2776 | |
2777 | /* |
2778 | * Check if plane_count is correct |
2779 | * (multiplane buffers are not supported). |
2780 | */ |
2781 | if (vb->num_planes != 1) { |
2782 | ret = -EBUSY; |
2783 | goto err_reqbufs; |
2784 | } |
2785 | |
2786 | /* |
2787 | * Get kernel address of each buffer. |
2788 | */ |
2789 | for (i = 0; i < vb2_get_num_buffers(q); i++) { |
2790 | /* vb can never be NULL when using fileio. */ |
2791 | vb = vb2_get_buffer(q, index: i); |
2792 | |
2793 | fileio->bufs[i].vaddr = vb2_plane_vaddr(vb, 0); |
2794 | if (fileio->bufs[i].vaddr == NULL) { |
2795 | ret = -EINVAL; |
2796 | goto err_reqbufs; |
2797 | } |
2798 | fileio->bufs[i].size = vb2_plane_size(vb, plane_no: 0); |
2799 | } |
2800 | |
2801 | /* |
2802 | * Read mode requires pre queuing of all buffers. |
2803 | */ |
2804 | if (read) { |
2805 | /* |
2806 | * Queue all buffers. |
2807 | */ |
2808 | for (i = 0; i < vb2_get_num_buffers(q); i++) { |
2809 | struct vb2_buffer *vb2 = vb2_get_buffer(q, index: i); |
2810 | |
2811 | if (!vb2) |
2812 | continue; |
2813 | |
2814 | ret = vb2_core_qbuf(q, vb2, NULL, NULL); |
2815 | if (ret) |
2816 | goto err_reqbufs; |
2817 | fileio->bufs[i].queued = 1; |
2818 | } |
2819 | /* |
2820 | * All buffers have been queued, so mark that by setting |
2821 | * initial_index to the number of buffers in the vb2_queue |
2822 | */ |
2823 | fileio->initial_index = vb2_get_num_buffers(q); |
2824 | fileio->cur_index = fileio->initial_index; |
2825 | } |
2826 | |
2827 | /* |
2828 | * Start streaming. |
2829 | */ |
2830 | ret = vb2_core_streamon(q, q->type); |
2831 | if (ret) |
2832 | goto err_reqbufs; |
2833 | |
2834 | return ret; |
2835 | |
2836 | err_reqbufs: |
2837 | fileio->count = 0; |
2838 | vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); |
2839 | |
2840 | err_kfree: |
2841 | q->fileio = NULL; |
2842 | kfree(objp: fileio); |
2843 | return ret; |
2844 | } |
2845 | |
2846 | /* |
2847 | * __vb2_cleanup_fileio() - free resourced used by file io emulator |
2848 | * @q: videobuf2 queue |
2849 | */ |
2850 | static int __vb2_cleanup_fileio(struct vb2_queue *q) |
2851 | { |
2852 | struct vb2_fileio_data *fileio = q->fileio; |
2853 | |
2854 | if (fileio) { |
2855 | vb2_core_streamoff(q, q->type); |
2856 | q->fileio = NULL; |
2857 | fileio->count = 0; |
2858 | vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); |
2859 | kfree(objp: fileio); |
2860 | dprintk(q, 3, "file io emulator closed\n" ); |
2861 | } |
2862 | return 0; |
2863 | } |
2864 | |
2865 | /* |
2866 | * __vb2_perform_fileio() - perform a single file io (read or write) operation |
2867 | * @q: videobuf2 queue |
2868 | * @data: pointed to target userspace buffer |
2869 | * @count: number of bytes to read or write |
2870 | * @ppos: file handle position tracking pointer |
2871 | * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) |
2872 | * @read: access mode selector (1 means read, 0 means write) |
2873 | */ |
2874 | static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, |
2875 | loff_t *ppos, int nonblock, int read) |
2876 | { |
2877 | struct vb2_fileio_data *fileio; |
2878 | struct vb2_fileio_buf *buf; |
2879 | bool is_multiplanar = q->is_multiplanar; |
2880 | /* |
2881 | * When using write() to write data to an output video node the vb2 core |
2882 | * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody |
2883 | * else is able to provide this information with the write() operation. |
2884 | */ |
2885 | bool copy_timestamp = !read && q->copy_timestamp; |
2886 | unsigned index; |
2887 | int ret; |
2888 | |
2889 | dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n" , |
2890 | read ? "read" : "write" , (long)*ppos, count, |
2891 | nonblock ? "non" : "" ); |
2892 | |
2893 | if (!data) |
2894 | return -EINVAL; |
2895 | |
2896 | if (q->waiting_in_dqbuf) { |
2897 | dprintk(q, 3, "another dup()ped fd is %s\n" , |
2898 | read ? "reading" : "writing" ); |
2899 | return -EBUSY; |
2900 | } |
2901 | |
2902 | /* |
2903 | * Initialize emulator on first call. |
2904 | */ |
2905 | if (!vb2_fileio_is_active(q)) { |
2906 | ret = __vb2_init_fileio(q, read); |
2907 | dprintk(q, 3, "vb2_init_fileio result: %d\n" , ret); |
2908 | if (ret) |
2909 | return ret; |
2910 | } |
2911 | fileio = q->fileio; |
2912 | |
2913 | /* |
2914 | * Check if we need to dequeue the buffer. |
2915 | */ |
2916 | index = fileio->cur_index; |
2917 | if (index >= vb2_get_num_buffers(q)) { |
2918 | struct vb2_buffer *b; |
2919 | |
2920 | /* |
2921 | * Call vb2_dqbuf to get buffer back. |
2922 | */ |
2923 | ret = vb2_core_dqbuf(q, &index, NULL, nonblock); |
2924 | dprintk(q, 5, "vb2_dqbuf result: %d\n" , ret); |
2925 | if (ret) |
2926 | return ret; |
2927 | fileio->dq_count += 1; |
2928 | |
2929 | fileio->cur_index = index; |
2930 | buf = &fileio->bufs[index]; |
2931 | |
2932 | /* b can never be NULL when using fileio. */ |
2933 | b = vb2_get_buffer(q, index); |
2934 | |
2935 | /* |
2936 | * Get number of bytes filled by the driver |
2937 | */ |
2938 | buf->pos = 0; |
2939 | buf->queued = 0; |
2940 | buf->size = read ? vb2_get_plane_payload(vb: b, plane_no: 0) |
2941 | : vb2_plane_size(vb: b, plane_no: 0); |
2942 | /* Compensate for data_offset on read in the multiplanar case. */ |
2943 | if (is_multiplanar && read && |
2944 | b->planes[0].data_offset < buf->size) { |
2945 | buf->pos = b->planes[0].data_offset; |
2946 | buf->size -= buf->pos; |
2947 | } |
2948 | } else { |
2949 | buf = &fileio->bufs[index]; |
2950 | } |
2951 | |
2952 | /* |
2953 | * Limit count on last few bytes of the buffer. |
2954 | */ |
2955 | if (buf->pos + count > buf->size) { |
2956 | count = buf->size - buf->pos; |
2957 | dprintk(q, 5, "reducing read count: %zd\n" , count); |
2958 | } |
2959 | |
2960 | /* |
2961 | * Transfer data to userspace. |
2962 | */ |
2963 | dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n" , |
2964 | count, index, buf->pos); |
2965 | if (read) |
2966 | ret = copy_to_user(to: data, from: buf->vaddr + buf->pos, n: count); |
2967 | else |
2968 | ret = copy_from_user(to: buf->vaddr + buf->pos, from: data, n: count); |
2969 | if (ret) { |
2970 | dprintk(q, 3, "error copying data\n" ); |
2971 | return -EFAULT; |
2972 | } |
2973 | |
2974 | /* |
2975 | * Update counters. |
2976 | */ |
2977 | buf->pos += count; |
2978 | *ppos += count; |
2979 | |
2980 | /* |
2981 | * Queue next buffer if required. |
2982 | */ |
2983 | if (buf->pos == buf->size || (!read && fileio->write_immediately)) { |
2984 | /* b can never be NULL when using fileio. */ |
2985 | struct vb2_buffer *b = vb2_get_buffer(q, index); |
2986 | |
2987 | /* |
2988 | * Check if this is the last buffer to read. |
2989 | */ |
2990 | if (read && fileio->read_once && fileio->dq_count == 1) { |
2991 | dprintk(q, 3, "read limit reached\n" ); |
2992 | return __vb2_cleanup_fileio(q); |
2993 | } |
2994 | |
2995 | /* |
2996 | * Call vb2_qbuf and give buffer to the driver. |
2997 | */ |
2998 | b->planes[0].bytesused = buf->pos; |
2999 | |
3000 | if (copy_timestamp) |
3001 | b->timestamp = ktime_get_ns(); |
3002 | ret = vb2_core_qbuf(q, b, NULL, NULL); |
3003 | dprintk(q, 5, "vb2_qbuf result: %d\n" , ret); |
3004 | if (ret) |
3005 | return ret; |
3006 | |
3007 | /* |
3008 | * Buffer has been queued, update the status |
3009 | */ |
3010 | buf->pos = 0; |
3011 | buf->queued = 1; |
3012 | buf->size = vb2_plane_size(vb: b, plane_no: 0); |
3013 | fileio->q_count += 1; |
3014 | /* |
3015 | * If we are queuing up buffers for the first time, then |
3016 | * increase initial_index by one. |
3017 | */ |
3018 | if (fileio->initial_index < vb2_get_num_buffers(q)) |
3019 | fileio->initial_index++; |
3020 | /* |
3021 | * The next buffer to use is either a buffer that's going to be |
3022 | * queued for the first time (initial_index < number of buffers in the vb2_queue) |
3023 | * or it is equal to the number of buffers in the vb2_queue, |
3024 | * meaning that the next time we need to dequeue a buffer since |
3025 | * we've now queued up all the 'first time' buffers. |
3026 | */ |
3027 | fileio->cur_index = fileio->initial_index; |
3028 | } |
3029 | |
3030 | /* |
3031 | * Return proper number of bytes processed. |
3032 | */ |
3033 | if (ret == 0) |
3034 | ret = count; |
3035 | return ret; |
3036 | } |
3037 | |
3038 | size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, |
3039 | loff_t *ppos, int nonblocking) |
3040 | { |
3041 | return __vb2_perform_fileio(q, data, count, ppos, nonblock: nonblocking, read: 1); |
3042 | } |
3043 | EXPORT_SYMBOL_GPL(vb2_read); |
3044 | |
3045 | size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, |
3046 | loff_t *ppos, int nonblocking) |
3047 | { |
3048 | return __vb2_perform_fileio(q, data: (char __user *) data, count, |
3049 | ppos, nonblock: nonblocking, read: 0); |
3050 | } |
3051 | EXPORT_SYMBOL_GPL(vb2_write); |
3052 | |
3053 | struct vb2_threadio_data { |
3054 | struct task_struct *thread; |
3055 | vb2_thread_fnc fnc; |
3056 | void *priv; |
3057 | bool stop; |
3058 | }; |
3059 | |
3060 | static int vb2_thread(void *data) |
3061 | { |
3062 | struct vb2_queue *q = data; |
3063 | struct vb2_threadio_data *threadio = q->threadio; |
3064 | bool copy_timestamp = false; |
3065 | unsigned prequeue = 0; |
3066 | unsigned index = 0; |
3067 | int ret = 0; |
3068 | |
3069 | if (q->is_output) { |
3070 | prequeue = vb2_get_num_buffers(q); |
3071 | copy_timestamp = q->copy_timestamp; |
3072 | } |
3073 | |
3074 | set_freezable(); |
3075 | |
3076 | for (;;) { |
3077 | struct vb2_buffer *vb; |
3078 | |
3079 | /* |
3080 | * Call vb2_dqbuf to get buffer back. |
3081 | */ |
3082 | if (prequeue) { |
3083 | vb = vb2_get_buffer(q, index: index++); |
3084 | if (!vb) |
3085 | continue; |
3086 | prequeue--; |
3087 | } else { |
3088 | call_void_qop(q, wait_finish, q); |
3089 | if (!threadio->stop) |
3090 | ret = vb2_core_dqbuf(q, &index, NULL, 0); |
3091 | call_void_qop(q, wait_prepare, q); |
3092 | dprintk(q, 5, "file io: vb2_dqbuf result: %d\n" , ret); |
3093 | if (!ret) |
3094 | vb = vb2_get_buffer(q, index); |
3095 | } |
3096 | if (ret || threadio->stop) |
3097 | break; |
3098 | try_to_freeze(); |
3099 | |
3100 | if (vb->state != VB2_BUF_STATE_ERROR) |
3101 | if (threadio->fnc(vb, threadio->priv)) |
3102 | break; |
3103 | call_void_qop(q, wait_finish, q); |
3104 | if (copy_timestamp) |
3105 | vb->timestamp = ktime_get_ns(); |
3106 | if (!threadio->stop) |
3107 | ret = vb2_core_qbuf(q, vb, NULL, NULL); |
3108 | call_void_qop(q, wait_prepare, q); |
3109 | if (ret || threadio->stop) |
3110 | break; |
3111 | } |
3112 | |
3113 | /* Hmm, linux becomes *very* unhappy without this ... */ |
3114 | while (!kthread_should_stop()) { |
3115 | set_current_state(TASK_INTERRUPTIBLE); |
3116 | schedule(); |
3117 | } |
3118 | return 0; |
3119 | } |
3120 | |
3121 | /* |
3122 | * This function should not be used for anything else but the videobuf2-dvb |
3123 | * support. If you think you have another good use-case for this, then please |
3124 | * contact the linux-media mailinglist first. |
3125 | */ |
3126 | int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, |
3127 | const char *thread_name) |
3128 | { |
3129 | struct vb2_threadio_data *threadio; |
3130 | int ret = 0; |
3131 | |
3132 | if (q->threadio) |
3133 | return -EBUSY; |
3134 | if (vb2_is_busy(q)) |
3135 | return -EBUSY; |
3136 | if (WARN_ON(q->fileio)) |
3137 | return -EBUSY; |
3138 | |
3139 | threadio = kzalloc(size: sizeof(*threadio), GFP_KERNEL); |
3140 | if (threadio == NULL) |
3141 | return -ENOMEM; |
3142 | threadio->fnc = fnc; |
3143 | threadio->priv = priv; |
3144 | |
3145 | ret = __vb2_init_fileio(q, read: !q->is_output); |
3146 | dprintk(q, 3, "file io: vb2_init_fileio result: %d\n" , ret); |
3147 | if (ret) |
3148 | goto nomem; |
3149 | q->threadio = threadio; |
3150 | threadio->thread = kthread_run(vb2_thread, q, "vb2-%s" , thread_name); |
3151 | if (IS_ERR(ptr: threadio->thread)) { |
3152 | ret = PTR_ERR(ptr: threadio->thread); |
3153 | threadio->thread = NULL; |
3154 | goto nothread; |
3155 | } |
3156 | return 0; |
3157 | |
3158 | nothread: |
3159 | __vb2_cleanup_fileio(q); |
3160 | nomem: |
3161 | kfree(objp: threadio); |
3162 | return ret; |
3163 | } |
3164 | EXPORT_SYMBOL_GPL(vb2_thread_start); |
3165 | |
3166 | int vb2_thread_stop(struct vb2_queue *q) |
3167 | { |
3168 | struct vb2_threadio_data *threadio = q->threadio; |
3169 | int err; |
3170 | |
3171 | if (threadio == NULL) |
3172 | return 0; |
3173 | threadio->stop = true; |
3174 | /* Wake up all pending sleeps in the thread */ |
3175 | vb2_queue_error(q); |
3176 | err = kthread_stop(k: threadio->thread); |
3177 | __vb2_cleanup_fileio(q); |
3178 | threadio->thread = NULL; |
3179 | kfree(objp: threadio); |
3180 | q->threadio = NULL; |
3181 | return err; |
3182 | } |
3183 | EXPORT_SYMBOL_GPL(vb2_thread_stop); |
3184 | |
3185 | MODULE_DESCRIPTION("Media buffer core framework" ); |
3186 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski" ); |
3187 | MODULE_LICENSE("GPL" ); |
3188 | MODULE_IMPORT_NS(DMA_BUF); |
3189 | |