1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * uvc_queue.c -- USB Video Class driver - Buffers management |
4 | * |
5 | * Copyright (C) 2005-2010 |
6 | * Laurent Pinchart (laurent.pinchart@ideasonboard.com) |
7 | */ |
8 | |
9 | #include <linux/atomic.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/list.h> |
13 | #include <linux/module.h> |
14 | #include <linux/usb.h> |
15 | #include <linux/videodev2.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/wait.h> |
18 | #include <media/videobuf2-v4l2.h> |
19 | #include <media/videobuf2-vmalloc.h> |
20 | |
21 | #include "uvcvideo.h" |
22 | |
23 | /* ------------------------------------------------------------------------ |
24 | * Video buffers queue management. |
25 | * |
26 | * Video queues is initialized by uvc_queue_init(). The function performs |
27 | * basic initialization of the uvc_video_queue struct and never fails. |
28 | * |
29 | * Video buffers are managed by videobuf2. The driver uses a mutex to protect |
30 | * the videobuf2 queue operations by serializing calls to videobuf2 and a |
31 | * spinlock to protect the IRQ queue that holds the buffers to be processed by |
32 | * the driver. |
33 | */ |
34 | |
35 | static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf) |
36 | { |
37 | return container_of(buf, struct uvc_buffer, buf); |
38 | } |
39 | |
40 | /* |
41 | * Return all queued buffers to videobuf2 in the requested state. |
42 | * |
43 | * This function must be called with the queue spinlock held. |
44 | */ |
45 | static void uvc_queue_return_buffers(struct uvc_video_queue *queue, |
46 | enum uvc_buffer_state state) |
47 | { |
48 | enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR |
49 | ? VB2_BUF_STATE_ERROR |
50 | : VB2_BUF_STATE_QUEUED; |
51 | |
52 | while (!list_empty(head: &queue->irqqueue)) { |
53 | struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, |
54 | struct uvc_buffer, |
55 | queue); |
56 | list_del(entry: &buf->queue); |
57 | buf->state = state; |
58 | vb2_buffer_done(vb: &buf->buf.vb2_buf, state: vb2_state); |
59 | } |
60 | } |
61 | |
62 | /* ----------------------------------------------------------------------------- |
63 | * videobuf2 queue operations |
64 | */ |
65 | |
66 | static int uvc_queue_setup(struct vb2_queue *vq, |
67 | unsigned int *nbuffers, unsigned int *nplanes, |
68 | unsigned int sizes[], struct device *alloc_devs[]) |
69 | { |
70 | struct uvc_video_queue *queue = vb2_get_drv_priv(q: vq); |
71 | struct uvc_streaming *stream; |
72 | unsigned int size; |
73 | |
74 | switch (vq->type) { |
75 | case V4L2_BUF_TYPE_META_CAPTURE: |
76 | size = UVC_METADATA_BUF_SIZE; |
77 | break; |
78 | |
79 | default: |
80 | stream = uvc_queue_to_stream(queue); |
81 | size = stream->ctrl.dwMaxVideoFrameSize; |
82 | break; |
83 | } |
84 | |
85 | /* |
86 | * When called with plane sizes, validate them. The driver supports |
87 | * single planar formats only, and requires buffers to be large enough |
88 | * to store a complete frame. |
89 | */ |
90 | if (*nplanes) |
91 | return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0; |
92 | |
93 | *nplanes = 1; |
94 | sizes[0] = size; |
95 | return 0; |
96 | } |
97 | |
98 | static int uvc_buffer_prepare(struct vb2_buffer *vb) |
99 | { |
100 | struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); |
101 | struct uvc_video_queue *queue = vb2_get_drv_priv(q: vb->vb2_queue); |
102 | struct uvc_buffer *buf = uvc_vbuf_to_buffer(buf: vbuf); |
103 | |
104 | if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && |
105 | vb2_get_plane_payload(vb, plane_no: 0) > vb2_plane_size(vb, plane_no: 0)) { |
106 | uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE, |
107 | "[E] Bytes used out of bounds\n" ); |
108 | return -EINVAL; |
109 | } |
110 | |
111 | if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) |
112 | return -ENODEV; |
113 | |
114 | buf->state = UVC_BUF_STATE_QUEUED; |
115 | buf->error = 0; |
116 | buf->mem = vb2_plane_vaddr(vb, plane_no: 0); |
117 | buf->length = vb2_plane_size(vb, plane_no: 0); |
118 | if (vb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) |
119 | buf->bytesused = 0; |
120 | else |
121 | buf->bytesused = vb2_get_plane_payload(vb, plane_no: 0); |
122 | |
123 | return 0; |
124 | } |
125 | |
126 | static void uvc_buffer_queue(struct vb2_buffer *vb) |
127 | { |
128 | struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); |
129 | struct uvc_video_queue *queue = vb2_get_drv_priv(q: vb->vb2_queue); |
130 | struct uvc_buffer *buf = uvc_vbuf_to_buffer(buf: vbuf); |
131 | unsigned long flags; |
132 | |
133 | spin_lock_irqsave(&queue->irqlock, flags); |
134 | if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { |
135 | kref_init(kref: &buf->ref); |
136 | list_add_tail(new: &buf->queue, head: &queue->irqqueue); |
137 | } else { |
138 | /* |
139 | * If the device is disconnected return the buffer to userspace |
140 | * directly. The next QBUF call will fail with -ENODEV. |
141 | */ |
142 | buf->state = UVC_BUF_STATE_ERROR; |
143 | vb2_buffer_done(vb, state: VB2_BUF_STATE_ERROR); |
144 | } |
145 | |
146 | spin_unlock_irqrestore(lock: &queue->irqlock, flags); |
147 | } |
148 | |
149 | static void uvc_buffer_finish(struct vb2_buffer *vb) |
150 | { |
151 | struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); |
152 | struct uvc_video_queue *queue = vb2_get_drv_priv(q: vb->vb2_queue); |
153 | struct uvc_streaming *stream = uvc_queue_to_stream(queue); |
154 | struct uvc_buffer *buf = uvc_vbuf_to_buffer(buf: vbuf); |
155 | |
156 | if (vb->state == VB2_BUF_STATE_DONE) |
157 | uvc_video_clock_update(stream, vbuf, buf); |
158 | } |
159 | |
160 | static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count) |
161 | { |
162 | struct uvc_video_queue *queue = vb2_get_drv_priv(q: vq); |
163 | struct uvc_streaming *stream = uvc_queue_to_stream(queue); |
164 | int ret; |
165 | |
166 | lockdep_assert_irqs_enabled(); |
167 | |
168 | queue->buf_used = 0; |
169 | |
170 | ret = uvc_video_start_streaming(stream); |
171 | if (ret == 0) |
172 | return 0; |
173 | |
174 | spin_lock_irq(lock: &queue->irqlock); |
175 | uvc_queue_return_buffers(queue, state: UVC_BUF_STATE_QUEUED); |
176 | spin_unlock_irq(lock: &queue->irqlock); |
177 | |
178 | return ret; |
179 | } |
180 | |
181 | static void uvc_stop_streaming(struct vb2_queue *vq) |
182 | { |
183 | struct uvc_video_queue *queue = vb2_get_drv_priv(q: vq); |
184 | |
185 | lockdep_assert_irqs_enabled(); |
186 | |
187 | if (vq->type != V4L2_BUF_TYPE_META_CAPTURE) |
188 | uvc_video_stop_streaming(stream: uvc_queue_to_stream(queue)); |
189 | |
190 | spin_lock_irq(lock: &queue->irqlock); |
191 | uvc_queue_return_buffers(queue, state: UVC_BUF_STATE_ERROR); |
192 | spin_unlock_irq(lock: &queue->irqlock); |
193 | } |
194 | |
195 | static const struct vb2_ops uvc_queue_qops = { |
196 | .queue_setup = uvc_queue_setup, |
197 | .buf_prepare = uvc_buffer_prepare, |
198 | .buf_queue = uvc_buffer_queue, |
199 | .buf_finish = uvc_buffer_finish, |
200 | .start_streaming = uvc_start_streaming, |
201 | .stop_streaming = uvc_stop_streaming, |
202 | }; |
203 | |
204 | static const struct vb2_ops uvc_meta_queue_qops = { |
205 | .queue_setup = uvc_queue_setup, |
206 | .buf_prepare = uvc_buffer_prepare, |
207 | .buf_queue = uvc_buffer_queue, |
208 | .stop_streaming = uvc_stop_streaming, |
209 | }; |
210 | |
211 | int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type) |
212 | { |
213 | int ret; |
214 | |
215 | queue->queue.type = type; |
216 | queue->queue.io_modes = VB2_MMAP | VB2_USERPTR; |
217 | queue->queue.drv_priv = queue; |
218 | queue->queue.buf_struct_size = sizeof(struct uvc_buffer); |
219 | queue->queue.mem_ops = &vb2_vmalloc_memops; |
220 | queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC |
221 | | V4L2_BUF_FLAG_TSTAMP_SRC_SOE; |
222 | queue->queue.lock = &queue->mutex; |
223 | |
224 | switch (type) { |
225 | case V4L2_BUF_TYPE_META_CAPTURE: |
226 | queue->queue.ops = &uvc_meta_queue_qops; |
227 | break; |
228 | default: |
229 | queue->queue.io_modes |= VB2_DMABUF; |
230 | queue->queue.ops = &uvc_queue_qops; |
231 | break; |
232 | } |
233 | |
234 | ret = vb2_queue_init(q: &queue->queue); |
235 | if (ret) |
236 | return ret; |
237 | |
238 | mutex_init(&queue->mutex); |
239 | spin_lock_init(&queue->irqlock); |
240 | INIT_LIST_HEAD(list: &queue->irqqueue); |
241 | |
242 | return 0; |
243 | } |
244 | |
245 | void uvc_queue_release(struct uvc_video_queue *queue) |
246 | { |
247 | mutex_lock(&queue->mutex); |
248 | vb2_queue_release(q: &queue->queue); |
249 | mutex_unlock(lock: &queue->mutex); |
250 | } |
251 | |
252 | /* ----------------------------------------------------------------------------- |
253 | * V4L2 queue operations |
254 | */ |
255 | |
256 | int uvc_request_buffers(struct uvc_video_queue *queue, |
257 | struct v4l2_requestbuffers *rb) |
258 | { |
259 | int ret; |
260 | |
261 | mutex_lock(&queue->mutex); |
262 | ret = vb2_reqbufs(q: &queue->queue, req: rb); |
263 | mutex_unlock(lock: &queue->mutex); |
264 | |
265 | return ret ? ret : rb->count; |
266 | } |
267 | |
268 | int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) |
269 | { |
270 | int ret; |
271 | |
272 | mutex_lock(&queue->mutex); |
273 | ret = vb2_querybuf(q: &queue->queue, b: buf); |
274 | mutex_unlock(lock: &queue->mutex); |
275 | |
276 | return ret; |
277 | } |
278 | |
279 | int uvc_create_buffers(struct uvc_video_queue *queue, |
280 | struct v4l2_create_buffers *cb) |
281 | { |
282 | int ret; |
283 | |
284 | mutex_lock(&queue->mutex); |
285 | ret = vb2_create_bufs(q: &queue->queue, create: cb); |
286 | mutex_unlock(lock: &queue->mutex); |
287 | |
288 | return ret; |
289 | } |
290 | |
291 | int uvc_queue_buffer(struct uvc_video_queue *queue, |
292 | struct media_device *mdev, struct v4l2_buffer *buf) |
293 | { |
294 | int ret; |
295 | |
296 | mutex_lock(&queue->mutex); |
297 | ret = vb2_qbuf(q: &queue->queue, mdev, b: buf); |
298 | mutex_unlock(lock: &queue->mutex); |
299 | |
300 | return ret; |
301 | } |
302 | |
303 | int uvc_export_buffer(struct uvc_video_queue *queue, |
304 | struct v4l2_exportbuffer *exp) |
305 | { |
306 | int ret; |
307 | |
308 | mutex_lock(&queue->mutex); |
309 | ret = vb2_expbuf(q: &queue->queue, eb: exp); |
310 | mutex_unlock(lock: &queue->mutex); |
311 | |
312 | return ret; |
313 | } |
314 | |
315 | int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, |
316 | int nonblocking) |
317 | { |
318 | int ret; |
319 | |
320 | mutex_lock(&queue->mutex); |
321 | ret = vb2_dqbuf(q: &queue->queue, b: buf, nonblocking); |
322 | mutex_unlock(lock: &queue->mutex); |
323 | |
324 | return ret; |
325 | } |
326 | |
327 | int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) |
328 | { |
329 | int ret; |
330 | |
331 | mutex_lock(&queue->mutex); |
332 | ret = vb2_streamon(q: &queue->queue, type); |
333 | mutex_unlock(lock: &queue->mutex); |
334 | |
335 | return ret; |
336 | } |
337 | |
338 | int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) |
339 | { |
340 | int ret; |
341 | |
342 | mutex_lock(&queue->mutex); |
343 | ret = vb2_streamoff(q: &queue->queue, type); |
344 | mutex_unlock(lock: &queue->mutex); |
345 | |
346 | return ret; |
347 | } |
348 | |
349 | int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) |
350 | { |
351 | return vb2_mmap(q: &queue->queue, vma); |
352 | } |
353 | |
354 | #ifndef CONFIG_MMU |
355 | unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, |
356 | unsigned long pgoff) |
357 | { |
358 | return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); |
359 | } |
360 | #endif |
361 | |
362 | __poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, |
363 | poll_table *wait) |
364 | { |
365 | __poll_t ret; |
366 | |
367 | mutex_lock(&queue->mutex); |
368 | ret = vb2_poll(q: &queue->queue, file, wait); |
369 | mutex_unlock(lock: &queue->mutex); |
370 | |
371 | return ret; |
372 | } |
373 | |
374 | /* ----------------------------------------------------------------------------- |
375 | * |
376 | */ |
377 | |
378 | /* |
379 | * Check if buffers have been allocated. |
380 | */ |
381 | int uvc_queue_allocated(struct uvc_video_queue *queue) |
382 | { |
383 | int allocated; |
384 | |
385 | mutex_lock(&queue->mutex); |
386 | allocated = vb2_is_busy(q: &queue->queue); |
387 | mutex_unlock(lock: &queue->mutex); |
388 | |
389 | return allocated; |
390 | } |
391 | |
392 | /* |
393 | * Cancel the video buffers queue. |
394 | * |
395 | * Cancelling the queue marks all buffers on the irq queue as erroneous, |
396 | * wakes them up and removes them from the queue. |
397 | * |
398 | * If the disconnect parameter is set, further calls to uvc_queue_buffer will |
399 | * fail with -ENODEV. |
400 | * |
401 | * This function acquires the irq spinlock and can be called from interrupt |
402 | * context. |
403 | */ |
404 | void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) |
405 | { |
406 | unsigned long flags; |
407 | |
408 | spin_lock_irqsave(&queue->irqlock, flags); |
409 | uvc_queue_return_buffers(queue, state: UVC_BUF_STATE_ERROR); |
410 | /* |
411 | * This must be protected by the irqlock spinlock to avoid race |
412 | * conditions between uvc_buffer_queue and the disconnection event that |
413 | * could result in an interruptible wait in uvc_dequeue_buffer. Do not |
414 | * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED |
415 | * state outside the queue code. |
416 | */ |
417 | if (disconnect) |
418 | queue->flags |= UVC_QUEUE_DISCONNECTED; |
419 | spin_unlock_irqrestore(lock: &queue->irqlock, flags); |
420 | } |
421 | |
422 | /* |
423 | * uvc_queue_get_current_buffer: Obtain the current working output buffer |
424 | * |
425 | * Buffers may span multiple packets, and even URBs, therefore the active buffer |
426 | * remains on the queue until the EOF marker. |
427 | */ |
428 | static struct uvc_buffer * |
429 | __uvc_queue_get_current_buffer(struct uvc_video_queue *queue) |
430 | { |
431 | if (list_empty(head: &queue->irqqueue)) |
432 | return NULL; |
433 | |
434 | return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue); |
435 | } |
436 | |
437 | struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue) |
438 | { |
439 | struct uvc_buffer *nextbuf; |
440 | unsigned long flags; |
441 | |
442 | spin_lock_irqsave(&queue->irqlock, flags); |
443 | nextbuf = __uvc_queue_get_current_buffer(queue); |
444 | spin_unlock_irqrestore(lock: &queue->irqlock, flags); |
445 | |
446 | return nextbuf; |
447 | } |
448 | |
449 | /* |
450 | * uvc_queue_buffer_requeue: Requeue a buffer on our internal irqqueue |
451 | * |
452 | * Reuse a buffer through our internal queue without the need to 'prepare'. |
453 | * The buffer will be returned to userspace through the uvc_buffer_queue call if |
454 | * the device has been disconnected. |
455 | */ |
456 | static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue, |
457 | struct uvc_buffer *buf) |
458 | { |
459 | buf->error = 0; |
460 | buf->state = UVC_BUF_STATE_QUEUED; |
461 | buf->bytesused = 0; |
462 | vb2_set_plane_payload(vb: &buf->buf.vb2_buf, plane_no: 0, size: 0); |
463 | |
464 | uvc_buffer_queue(vb: &buf->buf.vb2_buf); |
465 | } |
466 | |
467 | static void uvc_queue_buffer_complete(struct kref *ref) |
468 | { |
469 | struct uvc_buffer *buf = container_of(ref, struct uvc_buffer, ref); |
470 | struct vb2_buffer *vb = &buf->buf.vb2_buf; |
471 | struct uvc_video_queue *queue = vb2_get_drv_priv(q: vb->vb2_queue); |
472 | |
473 | if (buf->error && !uvc_no_drop_param) { |
474 | uvc_queue_buffer_requeue(queue, buf); |
475 | return; |
476 | } |
477 | |
478 | buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; |
479 | vb2_set_plane_payload(vb: &buf->buf.vb2_buf, plane_no: 0, size: buf->bytesused); |
480 | vb2_buffer_done(vb: &buf->buf.vb2_buf, state: buf->error ? VB2_BUF_STATE_ERROR : |
481 | VB2_BUF_STATE_DONE); |
482 | } |
483 | |
484 | /* |
485 | * Release a reference on the buffer. Complete the buffer when the last |
486 | * reference is released. |
487 | */ |
488 | void uvc_queue_buffer_release(struct uvc_buffer *buf) |
489 | { |
490 | kref_put(kref: &buf->ref, release: uvc_queue_buffer_complete); |
491 | } |
492 | |
493 | /* |
494 | * Remove this buffer from the queue. Lifetime will persist while async actions |
495 | * are still running (if any), and uvc_queue_buffer_release will give the buffer |
496 | * back to VB2 when all users have completed. |
497 | */ |
498 | struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, |
499 | struct uvc_buffer *buf) |
500 | { |
501 | struct uvc_buffer *nextbuf; |
502 | unsigned long flags; |
503 | |
504 | spin_lock_irqsave(&queue->irqlock, flags); |
505 | list_del(entry: &buf->queue); |
506 | nextbuf = __uvc_queue_get_current_buffer(queue); |
507 | spin_unlock_irqrestore(lock: &queue->irqlock, flags); |
508 | |
509 | uvc_queue_buffer_release(buf); |
510 | |
511 | return nextbuf; |
512 | } |
513 | |