1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * Memory-to-memory device framework for Video for Linux 2. |
4 | * |
5 | * Helper functions for devices that use memory buffers for both source |
6 | * and destination. |
7 | * |
8 | * Copyright (c) 2009 Samsung Electronics Co., Ltd. |
9 | * Pawel Osciak, <pawel@osciak.com> |
10 | * Marek Szyprowski, <m.szyprowski@samsung.com> |
11 | */ |
12 | |
13 | #ifndef _MEDIA_V4L2_MEM2MEM_H |
14 | #define _MEDIA_V4L2_MEM2MEM_H |
15 | |
16 | #include <media/videobuf2-v4l2.h> |
17 | |
18 | /** |
19 | * struct v4l2_m2m_ops - mem-to-mem device driver callbacks |
20 | * @device_run: required. Begin the actual job (transaction) inside this |
21 | * callback. |
22 | * The job does NOT have to end before this callback returns |
23 | * (and it will be the usual case). When the job finishes, |
24 | * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() |
25 | * has to be called. |
26 | * @job_ready: optional. Should return 0 if the driver does not have a job |
27 | * fully prepared to run yet (i.e. it will not be able to finish a |
28 | * transaction without sleeping). If not provided, it will be |
29 | * assumed that one source and one destination buffer are all |
30 | * that is required for the driver to perform one full transaction. |
31 | * This method may not sleep. |
32 | * @job_abort: optional. Informs the driver that it has to abort the currently |
33 | * running transaction as soon as possible (i.e. as soon as it can |
34 | * stop the device safely; e.g. in the next interrupt handler), |
35 | * even if the transaction would not have been finished by then. |
36 | * After the driver performs the necessary steps, it has to call |
37 | * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as |
38 | * if the transaction ended normally. |
39 | * This function does not have to (and will usually not) wait |
40 | * until the device enters a state when it can be stopped. |
41 | */ |
42 | struct v4l2_m2m_ops { |
43 | void (*device_run)(void *priv); |
44 | int (*job_ready)(void *priv); |
45 | void (*job_abort)(void *priv); |
46 | }; |
47 | |
48 | struct video_device; |
49 | struct v4l2_m2m_dev; |
50 | |
51 | /** |
52 | * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be |
53 | * processed |
54 | * |
55 | * @q: pointer to struct &vb2_queue |
56 | * @rdy_queue: List of V4L2 mem-to-mem queues |
57 | * @rdy_spinlock: spin lock to protect the struct usage |
58 | * @num_rdy: number of buffers ready to be processed |
59 | * @buffered: is the queue buffered? |
60 | * |
61 | * Queue for buffers ready to be processed as soon as this |
62 | * instance receives access to the device. |
63 | */ |
64 | |
65 | struct v4l2_m2m_queue_ctx { |
66 | struct vb2_queue q; |
67 | |
68 | struct list_head rdy_queue; |
69 | spinlock_t rdy_spinlock; |
70 | u8 num_rdy; |
71 | bool buffered; |
72 | }; |
73 | |
74 | /** |
75 | * struct v4l2_m2m_ctx - Memory to memory context structure |
76 | * |
77 | * @q_lock: struct &mutex lock |
78 | * @new_frame: valid in the device_run callback: if true, then this |
79 | * starts a new frame; if false, then this is a new slice |
80 | * for an existing frame. This is always true unless |
81 | * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which |
82 | * indicates slicing support. |
83 | * @is_draining: indicates device is in draining phase |
84 | * @last_src_buf: indicate the last source buffer for draining |
85 | * @next_buf_last: next capture queud buffer will be tagged as last |
86 | * @has_stopped: indicate the device has been stopped |
87 | * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE |
88 | * queue is not streaming. This allows firmware to |
89 | * analyze the bitstream header which arrives on the |
90 | * OUTPUT queue. The driver must implement the job_ready |
91 | * callback correctly to make sure that the requirements |
92 | * for actual decoding are met. |
93 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
94 | * @cap_q_ctx: Capture (output to memory) queue context |
95 | * @out_q_ctx: Output (input from memory) queue context |
96 | * @queue: List of memory to memory contexts |
97 | * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c: |
98 | * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. |
99 | * @finished: Wait queue used to signalize when a job queue finished. |
100 | * @priv: Instance private data |
101 | * |
102 | * The memory to memory context is specific to a file handle, NOT to e.g. |
103 | * a device. |
104 | */ |
105 | struct v4l2_m2m_ctx { |
106 | /* optional cap/out vb2 queues lock */ |
107 | struct mutex *q_lock; |
108 | |
109 | bool new_frame; |
110 | |
111 | bool is_draining; |
112 | struct vb2_v4l2_buffer *last_src_buf; |
113 | bool next_buf_last; |
114 | bool has_stopped; |
115 | bool ignore_cap_streaming; |
116 | |
117 | /* internal use only */ |
118 | struct v4l2_m2m_dev *m2m_dev; |
119 | |
120 | struct v4l2_m2m_queue_ctx cap_q_ctx; |
121 | |
122 | struct v4l2_m2m_queue_ctx out_q_ctx; |
123 | |
124 | /* For device job queue */ |
125 | struct list_head queue; |
126 | unsigned long job_flags; |
127 | wait_queue_head_t finished; |
128 | |
129 | void *priv; |
130 | }; |
131 | |
132 | /** |
133 | * struct v4l2_m2m_buffer - Memory to memory buffer |
134 | * |
135 | * @vb: pointer to struct &vb2_v4l2_buffer |
136 | * @list: list of m2m buffers |
137 | */ |
138 | struct v4l2_m2m_buffer { |
139 | struct vb2_v4l2_buffer vb; |
140 | struct list_head list; |
141 | }; |
142 | |
143 | /** |
144 | * v4l2_m2m_get_curr_priv() - return driver private data for the currently |
145 | * running instance or NULL if no instance is running |
146 | * |
147 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
148 | */ |
149 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); |
150 | |
151 | /** |
152 | * v4l2_m2m_get_vq() - return vb2_queue for the given type |
153 | * |
154 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
155 | * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type |
156 | */ |
157 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
158 | enum v4l2_buf_type type); |
159 | |
160 | /** |
161 | * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to |
162 | * the pending job queue and add it if so. |
163 | * |
164 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
165 | * |
166 | * There are three basic requirements an instance has to meet to be able to run: |
167 | * 1) at least one source buffer has to be queued, |
168 | * 2) at least one destination buffer has to be queued, |
169 | * 3) streaming has to be on. |
170 | * |
171 | * If a queue is buffered (for example a decoder hardware ringbuffer that has |
172 | * to be drained before doing streamoff), allow scheduling without v4l2 buffers |
173 | * on that queue. |
174 | * |
175 | * There may also be additional, custom requirements. In such case the driver |
176 | * should supply a custom callback (job_ready in v4l2_m2m_ops) that should |
177 | * return 1 if the instance is ready. |
178 | * An example of the above could be an instance that requires more than one |
179 | * src/dst buffer per transaction. |
180 | */ |
181 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); |
182 | |
183 | /** |
184 | * v4l2_m2m_job_finish() - inform the framework that a job has been finished |
185 | * and have it clean up |
186 | * |
187 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
188 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
189 | * |
190 | * Called by a driver to yield back the device after it has finished with it. |
191 | * Should be called as soon as possible after reaching a state which allows |
192 | * other instances to take control of the device. |
193 | * |
194 | * This function has to be called only after &v4l2_m2m_ops->device_run |
195 | * callback has been called on the driver. To prevent recursion, it should |
196 | * not be called directly from the &v4l2_m2m_ops->device_run callback though. |
197 | */ |
198 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
199 | struct v4l2_m2m_ctx *m2m_ctx); |
200 | |
201 | /** |
202 | * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with |
203 | * state and inform the framework that a job has been finished and have it |
204 | * clean up |
205 | * |
206 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
207 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
208 | * @state: vb2 buffer state passed to v4l2_m2m_buf_done(). |
209 | * |
210 | * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this |
211 | * function instead of job_finish() to take held buffers into account. It is |
212 | * optional for other drivers. |
213 | * |
214 | * This function removes the source buffer from the ready list and returns |
215 | * it with the given state. The same is done for the destination buffer, unless |
216 | * it is marked 'held'. In that case the buffer is kept on the ready list. |
217 | * |
218 | * After that the job is finished (see job_finish()). |
219 | * |
220 | * This allows for multiple output buffers to be used to fill in a single |
221 | * capture buffer. This is typically used by stateless decoders where |
222 | * multiple e.g. H.264 slices contribute to a single decoded frame. |
223 | */ |
224 | void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, |
225 | struct v4l2_m2m_ctx *m2m_ctx, |
226 | enum vb2_buffer_state state); |
227 | |
228 | static inline void |
229 | v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) |
230 | { |
231 | vb2_buffer_done(vb: &buf->vb2_buf, state); |
232 | } |
233 | |
234 | /** |
235 | * v4l2_m2m_clear_state() - clear encoding/decoding state |
236 | * |
237 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
238 | */ |
239 | static inline void |
240 | v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx) |
241 | { |
242 | m2m_ctx->next_buf_last = false; |
243 | m2m_ctx->is_draining = false; |
244 | m2m_ctx->has_stopped = false; |
245 | } |
246 | |
247 | /** |
248 | * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped |
249 | * |
250 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
251 | */ |
252 | static inline void |
253 | v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx) |
254 | { |
255 | m2m_ctx->next_buf_last = false; |
256 | m2m_ctx->is_draining = false; |
257 | m2m_ctx->has_stopped = true; |
258 | } |
259 | |
260 | /** |
261 | * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session |
262 | * draining management state of next queued capture buffer |
263 | * |
264 | * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify |
265 | * the end of the capture session. |
266 | * |
267 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
268 | */ |
269 | static inline bool |
270 | v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx) |
271 | { |
272 | return m2m_ctx->is_draining && m2m_ctx->next_buf_last; |
273 | } |
274 | |
275 | /** |
276 | * v4l2_m2m_has_stopped() - return the current encoding/decoding session |
277 | * stopped state |
278 | * |
279 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
280 | */ |
281 | static inline bool |
282 | v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx) |
283 | { |
284 | return m2m_ctx->has_stopped; |
285 | } |
286 | |
287 | /** |
288 | * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining |
289 | * state in the current encoding/decoding session |
290 | * |
291 | * This will identify the last output buffer queued before a session stop |
292 | * was required, leading to an actual encoding/decoding session stop state |
293 | * in the encoding/decoding process after being processed. |
294 | * |
295 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
296 | * @vbuf: pointer to struct &v4l2_buffer |
297 | */ |
298 | static inline bool |
299 | v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx, |
300 | struct vb2_v4l2_buffer *vbuf) |
301 | { |
302 | return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf; |
303 | } |
304 | |
305 | /** |
306 | * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE |
307 | * |
308 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
309 | * @vbuf: pointer to struct &v4l2_buffer |
310 | */ |
311 | void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, |
312 | struct vb2_v4l2_buffer *vbuf); |
313 | |
314 | /** |
315 | * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job |
316 | * to finish |
317 | * |
318 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
319 | * |
320 | * Called by a driver in the suspend hook. Stop new jobs from being run, and |
321 | * wait for current running job to finish. |
322 | */ |
323 | void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev); |
324 | |
325 | /** |
326 | * v4l2_m2m_resume() - resume job running and try to run a queued job |
327 | * |
328 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
329 | * |
330 | * Called by a driver in the resume hook. This reverts the operation of |
331 | * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if |
332 | * there is any. |
333 | */ |
334 | void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev); |
335 | |
336 | /** |
337 | * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer |
338 | * |
339 | * @file: pointer to struct &file |
340 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
341 | * @reqbufs: pointer to struct &v4l2_requestbuffers |
342 | */ |
343 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
344 | struct v4l2_requestbuffers *reqbufs); |
345 | |
346 | /** |
347 | * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer |
348 | * |
349 | * @file: pointer to struct &file |
350 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
351 | * @buf: pointer to struct &v4l2_buffer |
352 | * |
353 | * See v4l2_m2m_mmap() documentation for details. |
354 | */ |
355 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
356 | struct v4l2_buffer *buf); |
357 | |
358 | /** |
359 | * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on |
360 | * the type |
361 | * |
362 | * @file: pointer to struct &file |
363 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
364 | * @buf: pointer to struct &v4l2_buffer |
365 | */ |
366 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
367 | struct v4l2_buffer *buf); |
368 | |
369 | /** |
370 | * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on |
371 | * the type |
372 | * |
373 | * @file: pointer to struct &file |
374 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
375 | * @buf: pointer to struct &v4l2_buffer |
376 | */ |
377 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
378 | struct v4l2_buffer *buf); |
379 | |
380 | /** |
381 | * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on |
382 | * the type |
383 | * |
384 | * @file: pointer to struct &file |
385 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
386 | * @buf: pointer to struct &v4l2_buffer |
387 | */ |
388 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
389 | struct v4l2_buffer *buf); |
390 | |
391 | /** |
392 | * v4l2_m2m_create_bufs() - create a source or destination buffer, depending |
393 | * on the type |
394 | * |
395 | * @file: pointer to struct &file |
396 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
397 | * @create: pointer to struct &v4l2_create_buffers |
398 | */ |
399 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
400 | struct v4l2_create_buffers *create); |
401 | |
402 | /** |
403 | * v4l2_m2m_expbuf() - export a source or destination buffer, depending on |
404 | * the type |
405 | * |
406 | * @file: pointer to struct &file |
407 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
408 | * @eb: pointer to struct &v4l2_exportbuffer |
409 | */ |
410 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
411 | struct v4l2_exportbuffer *eb); |
412 | |
413 | /** |
414 | * v4l2_m2m_streamon() - turn on streaming for a video queue |
415 | * |
416 | * @file: pointer to struct &file |
417 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
418 | * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type |
419 | */ |
420 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
421 | enum v4l2_buf_type type); |
422 | |
423 | /** |
424 | * v4l2_m2m_streamoff() - turn off streaming for a video queue |
425 | * |
426 | * @file: pointer to struct &file |
427 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
428 | * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type |
429 | */ |
430 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
431 | enum v4l2_buf_type type); |
432 | |
433 | /** |
434 | * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding |
435 | * session state when a start of streaming of a video queue is requested |
436 | * |
437 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
438 | * @q: queue |
439 | */ |
440 | void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, |
441 | struct vb2_queue *q); |
442 | |
443 | /** |
444 | * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding |
445 | * session state when a stop of streaming of a video queue is requested |
446 | * |
447 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
448 | * @q: queue |
449 | */ |
450 | void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, |
451 | struct vb2_queue *q); |
452 | |
453 | /** |
454 | * v4l2_m2m_encoder_cmd() - execute an encoder command |
455 | * |
456 | * @file: pointer to struct &file |
457 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
458 | * @ec: pointer to the encoder command |
459 | */ |
460 | int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
461 | struct v4l2_encoder_cmd *ec); |
462 | |
463 | /** |
464 | * v4l2_m2m_decoder_cmd() - execute a decoder command |
465 | * |
466 | * @file: pointer to struct &file |
467 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
468 | * @dc: pointer to the decoder command |
469 | */ |
470 | int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
471 | struct v4l2_decoder_cmd *dc); |
472 | |
473 | /** |
474 | * v4l2_m2m_poll() - poll replacement, for destination buffers only |
475 | * |
476 | * @file: pointer to struct &file |
477 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
478 | * @wait: pointer to struct &poll_table_struct |
479 | * |
480 | * Call from the driver's poll() function. Will poll both queues. If a buffer |
481 | * is available to dequeue (with dqbuf) from the source queue, this will |
482 | * indicate that a non-blocking write can be performed, while read will be |
483 | * returned in case of the destination queue. |
484 | */ |
485 | __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
486 | struct poll_table_struct *wait); |
487 | |
488 | /** |
489 | * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer |
490 | * |
491 | * @file: pointer to struct &file |
492 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
493 | * @vma: pointer to struct &vm_area_struct |
494 | * |
495 | * Call from driver's mmap() function. Will handle mmap() for both queues |
496 | * seamlessly for the video buffer, which will receive normal per-queue offsets |
497 | * and proper vb2 queue pointers. The differentiation is made outside |
498 | * vb2 by adding a predefined offset to buffers from one of the queues |
499 | * and subtracting it before passing it back to vb2. Only drivers (and |
500 | * thus applications) receive modified offsets. |
501 | */ |
502 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
503 | struct vm_area_struct *vma); |
504 | |
505 | #ifndef CONFIG_MMU |
506 | unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, |
507 | unsigned long len, unsigned long pgoff, |
508 | unsigned long flags); |
509 | #endif |
510 | /** |
511 | * v4l2_m2m_init() - initialize per-driver m2m data |
512 | * |
513 | * @m2m_ops: pointer to struct v4l2_m2m_ops |
514 | * |
515 | * Usually called from driver's ``probe()`` function. |
516 | * |
517 | * Return: returns an opaque pointer to the internal data to handle M2M context |
518 | */ |
519 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); |
520 | |
521 | #if defined(CONFIG_MEDIA_CONTROLLER) |
522 | void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev); |
523 | int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, |
524 | struct video_device *vdev, int function); |
525 | #else |
526 | static inline void |
527 | v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) |
528 | { |
529 | } |
530 | |
531 | static inline int |
532 | v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, |
533 | struct video_device *vdev, int function) |
534 | { |
535 | return 0; |
536 | } |
537 | #endif |
538 | |
539 | /** |
540 | * v4l2_m2m_release() - cleans up and frees a m2m_dev structure |
541 | * |
542 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
543 | * |
544 | * Usually called from driver's ``remove()`` function. |
545 | */ |
546 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); |
547 | |
548 | /** |
549 | * v4l2_m2m_ctx_init() - allocate and initialize a m2m context |
550 | * |
551 | * @m2m_dev: opaque pointer to the internal data to handle M2M context |
552 | * @drv_priv: driver's instance private data |
553 | * @queue_init: a callback for queue type-specific initialization function |
554 | * to be used for initializing vb2_queues |
555 | * |
556 | * Usually called from driver's ``open()`` function. |
557 | */ |
558 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
559 | void *drv_priv, |
560 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); |
561 | |
562 | static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx, |
563 | bool buffered) |
564 | { |
565 | m2m_ctx->out_q_ctx.buffered = buffered; |
566 | } |
567 | |
568 | static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, |
569 | bool buffered) |
570 | { |
571 | m2m_ctx->cap_q_ctx.buffered = buffered; |
572 | } |
573 | |
574 | /** |
575 | * v4l2_m2m_ctx_release() - release m2m context |
576 | * |
577 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
578 | * |
579 | * Usually called from driver's release() function. |
580 | */ |
581 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); |
582 | |
583 | /** |
584 | * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. |
585 | * |
586 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
587 | * @vbuf: pointer to struct &vb2_v4l2_buffer |
588 | * |
589 | * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback. |
590 | */ |
591 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, |
592 | struct vb2_v4l2_buffer *vbuf); |
593 | |
594 | /** |
595 | * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for |
596 | * use |
597 | * |
598 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
599 | */ |
600 | static inline |
601 | unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) |
602 | { |
603 | unsigned int num_buf_rdy; |
604 | unsigned long flags; |
605 | |
606 | spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
607 | num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; |
608 | spin_unlock_irqrestore(lock: &m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
609 | |
610 | return num_buf_rdy; |
611 | } |
612 | |
613 | /** |
614 | * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers |
615 | * ready for use |
616 | * |
617 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
618 | */ |
619 | static inline |
620 | unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) |
621 | { |
622 | unsigned int num_buf_rdy; |
623 | unsigned long flags; |
624 | |
625 | spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); |
626 | num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; |
627 | spin_unlock_irqrestore(lock: &m2m_ctx->cap_q_ctx.rdy_spinlock, flags); |
628 | |
629 | return num_buf_rdy; |
630 | } |
631 | |
632 | /** |
633 | * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers |
634 | * |
635 | * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx |
636 | */ |
637 | struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); |
638 | |
639 | /** |
640 | * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready |
641 | * buffers |
642 | * |
643 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
644 | */ |
645 | static inline struct vb2_v4l2_buffer * |
646 | v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) |
647 | { |
648 | return v4l2_m2m_next_buf(q_ctx: &m2m_ctx->out_q_ctx); |
649 | } |
650 | |
651 | /** |
652 | * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of |
653 | * ready buffers |
654 | * |
655 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
656 | */ |
657 | static inline struct vb2_v4l2_buffer * |
658 | v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) |
659 | { |
660 | return v4l2_m2m_next_buf(q_ctx: &m2m_ctx->cap_q_ctx); |
661 | } |
662 | |
663 | /** |
664 | * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers |
665 | * |
666 | * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx |
667 | */ |
668 | struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); |
669 | |
670 | /** |
671 | * v4l2_m2m_last_src_buf() - return last source buffer from the list of |
672 | * ready buffers |
673 | * |
674 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
675 | */ |
676 | static inline struct vb2_v4l2_buffer * |
677 | v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx) |
678 | { |
679 | return v4l2_m2m_last_buf(q_ctx: &m2m_ctx->out_q_ctx); |
680 | } |
681 | |
682 | /** |
683 | * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of |
684 | * ready buffers |
685 | * |
686 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
687 | */ |
688 | static inline struct vb2_v4l2_buffer * |
689 | v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) |
690 | { |
691 | return v4l2_m2m_last_buf(q_ctx: &m2m_ctx->cap_q_ctx); |
692 | } |
693 | |
694 | /** |
695 | * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready |
696 | * buffers |
697 | * |
698 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
699 | * @b: current buffer of type struct v4l2_m2m_buffer |
700 | */ |
701 | #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ |
702 | list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) |
703 | |
704 | /** |
705 | * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers |
706 | * |
707 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
708 | * @b: current buffer of type struct v4l2_m2m_buffer |
709 | */ |
710 | #define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ |
711 | list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) |
712 | |
713 | /** |
714 | * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready |
715 | * buffers safely |
716 | * |
717 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
718 | * @b: current buffer of type struct v4l2_m2m_buffer |
719 | * @n: used as temporary storage |
720 | */ |
721 | #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ |
722 | list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) |
723 | |
724 | /** |
725 | * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready |
726 | * buffers safely |
727 | * |
728 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
729 | * @b: current buffer of type struct v4l2_m2m_buffer |
730 | * @n: used as temporary storage |
731 | */ |
732 | #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ |
733 | list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) |
734 | |
735 | /** |
736 | * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers |
737 | * |
738 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
739 | */ |
740 | static inline |
741 | struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) |
742 | { |
743 | return &m2m_ctx->out_q_ctx.q; |
744 | } |
745 | |
746 | /** |
747 | * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers |
748 | * |
749 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
750 | */ |
751 | static inline |
752 | struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) |
753 | { |
754 | return &m2m_ctx->cap_q_ctx.q; |
755 | } |
756 | |
757 | /** |
758 | * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and |
759 | * return it |
760 | * |
761 | * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx |
762 | */ |
763 | struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); |
764 | |
765 | /** |
766 | * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready |
767 | * buffers and return it |
768 | * |
769 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
770 | */ |
771 | static inline struct vb2_v4l2_buffer * |
772 | v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) |
773 | { |
774 | return v4l2_m2m_buf_remove(q_ctx: &m2m_ctx->out_q_ctx); |
775 | } |
776 | |
777 | /** |
778 | * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of |
779 | * ready buffers and return it |
780 | * |
781 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
782 | */ |
783 | static inline struct vb2_v4l2_buffer * |
784 | v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) |
785 | { |
786 | return v4l2_m2m_buf_remove(q_ctx: &m2m_ctx->cap_q_ctx); |
787 | } |
788 | |
789 | /** |
790 | * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready |
791 | * buffers |
792 | * |
793 | * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx |
794 | * @vbuf: the buffer to be removed |
795 | */ |
796 | void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, |
797 | struct vb2_v4l2_buffer *vbuf); |
798 | |
799 | /** |
800 | * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list |
801 | * of ready buffers |
802 | * |
803 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
804 | * @vbuf: the buffer to be removed |
805 | */ |
806 | static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, |
807 | struct vb2_v4l2_buffer *vbuf) |
808 | { |
809 | v4l2_m2m_buf_remove_by_buf(q_ctx: &m2m_ctx->out_q_ctx, vbuf); |
810 | } |
811 | |
812 | /** |
813 | * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the |
814 | * list of ready buffers |
815 | * |
816 | * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx |
817 | * @vbuf: the buffer to be removed |
818 | */ |
819 | static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, |
820 | struct vb2_v4l2_buffer *vbuf) |
821 | { |
822 | v4l2_m2m_buf_remove_by_buf(q_ctx: &m2m_ctx->cap_q_ctx, vbuf); |
823 | } |
824 | |
825 | struct vb2_v4l2_buffer * |
826 | v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); |
827 | |
828 | static inline struct vb2_v4l2_buffer * |
829 | v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) |
830 | { |
831 | return v4l2_m2m_buf_remove_by_idx(q_ctx: &m2m_ctx->out_q_ctx, idx); |
832 | } |
833 | |
834 | static inline struct vb2_v4l2_buffer * |
835 | v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) |
836 | { |
837 | return v4l2_m2m_buf_remove_by_idx(q_ctx: &m2m_ctx->cap_q_ctx, idx); |
838 | } |
839 | |
840 | /** |
841 | * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from |
842 | * the output buffer to the capture buffer |
843 | * |
844 | * @out_vb: the output buffer that is the source of the metadata. |
845 | * @cap_vb: the capture buffer that will receive the metadata. |
846 | * @copy_frame_flags: copy the KEY/B/PFRAME flags as well. |
847 | * |
848 | * This helper function copies the timestamp, timecode (if the TIMECODE |
849 | * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME |
850 | * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb. |
851 | * |
852 | * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME |
853 | * flags are not copied. This is typically needed for encoders that |
854 | * set this bits explicitly. |
855 | */ |
856 | void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, |
857 | struct vb2_v4l2_buffer *cap_vb, |
858 | bool copy_frame_flags); |
859 | |
860 | /* v4l2 request helper */ |
861 | |
862 | void v4l2_m2m_request_queue(struct media_request *req); |
863 | |
864 | /* v4l2 ioctl helpers */ |
865 | |
866 | int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, |
867 | struct v4l2_requestbuffers *rb); |
868 | int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh, |
869 | struct v4l2_create_buffers *create); |
870 | int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh, |
871 | struct v4l2_buffer *buf); |
872 | int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh, |
873 | struct v4l2_exportbuffer *eb); |
874 | int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh, |
875 | struct v4l2_buffer *buf); |
876 | int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh, |
877 | struct v4l2_buffer *buf); |
878 | int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh, |
879 | struct v4l2_buffer *buf); |
880 | int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, |
881 | enum v4l2_buf_type type); |
882 | int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, |
883 | enum v4l2_buf_type type); |
884 | int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh, |
885 | struct v4l2_encoder_cmd *ec); |
886 | int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh, |
887 | struct v4l2_decoder_cmd *dc); |
888 | int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, |
889 | struct v4l2_encoder_cmd *ec); |
890 | int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, |
891 | struct v4l2_decoder_cmd *dc); |
892 | int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, |
893 | struct v4l2_decoder_cmd *dc); |
894 | int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, |
895 | struct v4l2_decoder_cmd *dc); |
896 | int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); |
897 | __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); |
898 | |
899 | #endif /* _MEDIA_V4L2_MEM2MEM_H */ |
900 | |
901 | |