1 | // Copyright (C) 2016 Jolla Ltd. |
---|---|
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "qgstvideorenderersink_p.h" |
5 | |
6 | #include <QtMultimedia/qvideoframe.h> |
7 | #include <QtMultimedia/qvideosink.h> |
8 | #include <QtMultimedia/private/qvideoframe_p.h> |
9 | #include <QtGui/rhi/qrhi.h> |
10 | #include <QtCore/qcoreapplication.h> |
11 | #include <QtCore/qdebug.h> |
12 | #include <QtCore/qloggingcategory.h> |
13 | #include <QtCore/private/qfactoryloader_p.h> |
14 | #include <QtCore/private/quniquehandle_p.h> |
15 | |
16 | #include <common/qgst_debug_p.h> |
17 | #include <common/qgstreamermetadata_p.h> |
18 | #include <common/qgstreamervideosink_p.h> |
19 | #include <common/qgstutils_p.h> |
20 | #include <common/qgstvideobuffer_p.h> |
21 | |
22 | #include <gst/video/video.h> |
23 | #include <gst/video/gstvideometa.h> |
24 | |
25 | |
26 | #if QT_CONFIG(gstreamer_gl) |
27 | #include <gst/gl/gl.h> |
28 | #endif // #if QT_CONFIG(gstreamer_gl) |
29 | |
30 | // DMA support |
31 | #if QT_CONFIG(gstreamer_gl_egl) && QT_CONFIG(linux_dmabuf) |
32 | # include <gst/allocators/gstdmabuf.h> |
33 | #endif |
34 | |
35 | // NOLINTBEGIN(readability-convert-member-functions-to-static) |
36 | |
37 | static Q_LOGGING_CATEGORY(qLcGstVideoRenderer, "qt.multimedia.gstvideorenderer") |
38 | |
39 | QT_BEGIN_NAMESPACE |
40 | |
41 | QGstVideoRenderer::QGstVideoRenderer(QGstreamerVideoSink *sink) |
42 | : m_sink(sink), m_surfaceCaps(createSurfaceCaps(sink)) |
43 | { |
44 | QObject::connect( |
45 | sender: sink, signal: &QGstreamerVideoSink::aboutToBeDestroyed, context: this, |
46 | slot: [this] { |
47 | QMutexLocker locker(&m_sinkMutex); |
48 | m_sink = nullptr; |
49 | }, |
50 | type: Qt::DirectConnection); |
51 | } |
52 | |
53 | QGstVideoRenderer::~QGstVideoRenderer() = default; |
54 | |
55 | QGstCaps QGstVideoRenderer::createSurfaceCaps([[maybe_unused]] QGstreamerVideoSink *sink) |
56 | { |
57 | QGstCaps caps = QGstCaps::create(); |
58 | |
59 | // All the formats that both we and gstreamer support |
60 | auto formats = QList<QVideoFrameFormat::PixelFormat>() |
61 | << QVideoFrameFormat::Format_YUV420P |
62 | << QVideoFrameFormat::Format_YUV422P |
63 | << QVideoFrameFormat::Format_YV12 |
64 | << QVideoFrameFormat::Format_UYVY |
65 | << QVideoFrameFormat::Format_YUYV |
66 | << QVideoFrameFormat::Format_NV12 |
67 | << QVideoFrameFormat::Format_NV21 |
68 | << QVideoFrameFormat::Format_AYUV |
69 | << QVideoFrameFormat::Format_P010 |
70 | << QVideoFrameFormat::Format_XRGB8888 |
71 | << QVideoFrameFormat::Format_XBGR8888 |
72 | << QVideoFrameFormat::Format_RGBX8888 |
73 | << QVideoFrameFormat::Format_BGRX8888 |
74 | << QVideoFrameFormat::Format_ARGB8888 |
75 | << QVideoFrameFormat::Format_ABGR8888 |
76 | << QVideoFrameFormat::Format_RGBA8888 |
77 | << QVideoFrameFormat::Format_BGRA8888 |
78 | << QVideoFrameFormat::Format_Y8 |
79 | << QVideoFrameFormat::Format_Y16 |
80 | ; |
81 | #if QT_CONFIG(gstreamer_gl) |
82 | QRhi *rhi = sink->rhi(); |
83 | if (rhi && rhi->backend() == QRhi::OpenGLES2) { |
84 | caps.addPixelFormats(formats, GST_CAPS_FEATURE_MEMORY_GL_MEMORY); |
85 | # if QT_CONFIG(gstreamer_gl_egl) && QT_CONFIG(linux_dmabuf) |
86 | if (sink->eglDisplay() && sink->eglImageTargetTexture2D()) { |
87 | // We currently do not handle planar DMA buffers, as it's somewhat unclear how to |
88 | // convert the planar EGLImage into something we can use from OpenGL |
89 | auto singlePlaneFormats = QList<QVideoFrameFormat::PixelFormat>() |
90 | << QVideoFrameFormat::Format_UYVY |
91 | << QVideoFrameFormat::Format_YUYV |
92 | << QVideoFrameFormat::Format_AYUV |
93 | << QVideoFrameFormat::Format_XRGB8888 |
94 | << QVideoFrameFormat::Format_XBGR8888 |
95 | << QVideoFrameFormat::Format_RGBX8888 |
96 | << QVideoFrameFormat::Format_BGRX8888 |
97 | << QVideoFrameFormat::Format_ARGB8888 |
98 | << QVideoFrameFormat::Format_ABGR8888 |
99 | << QVideoFrameFormat::Format_RGBA8888 |
100 | << QVideoFrameFormat::Format_BGRA8888 |
101 | << QVideoFrameFormat::Format_Y8 |
102 | << QVideoFrameFormat::Format_Y16 |
103 | ; |
104 | caps.addPixelFormats(formats: singlePlaneFormats, GST_CAPS_FEATURE_MEMORY_DMABUF); |
105 | } |
106 | # endif |
107 | } |
108 | #endif |
109 | caps.addPixelFormats(formats); |
110 | return caps; |
111 | } |
112 | |
113 | void QGstVideoRenderer::customEvent(QEvent *event) |
114 | { |
115 | QT_WARNING_PUSH |
116 | QT_WARNING_DISABLE_GCC("-Wswitch") // case value not in enumerated type ‘QEvent::Type’ |
117 | |
118 | switch (event->type()) { |
119 | case renderFramesEvent: { |
120 | // LATER: we currently show every frame. however it may be reasonable to drop frames |
121 | // here if the queue contains more than one frame |
122 | while (std::optional<RenderBufferState> nextState = m_bufferQueue.dequeue()) |
123 | handleNewBuffer(std::move(*nextState)); |
124 | return; |
125 | } |
126 | case stopEvent: { |
127 | m_currentState.buffer = {}; |
128 | m_currentPipelineFrame = {}; |
129 | updateCurrentVideoFrame(m_currentVideoFrame); |
130 | return; |
131 | } |
132 | |
133 | default: |
134 | return; |
135 | } |
136 | QT_WARNING_POP |
137 | } |
138 | |
139 | |
140 | void QGstVideoRenderer::handleNewBuffer(RenderBufferState state) |
141 | { |
142 | auto videoBuffer = std::make_unique<QGstVideoBuffer>(args&: state.buffer, args&: m_videoInfo, args&: m_sink, |
143 | args&: state.format, args&: state.memoryFormat); |
144 | QVideoFrame frame = QVideoFramePrivate::createFrame(buffer: std::move(videoBuffer), format: state.format); |
145 | QGstUtils::setFrameTimeStampsFromBuffer(frame: &frame, buffer: state.buffer.get()); |
146 | |
147 | m_currentPipelineFrame = std::move(frame); |
148 | m_currentState = std::move(state); |
149 | |
150 | if (!m_isActive) { |
151 | qCDebug(qLcGstVideoRenderer) << " showing empty video frame"; |
152 | updateCurrentVideoFrame({}); |
153 | return; |
154 | } |
155 | |
156 | updateCurrentVideoFrame(m_currentPipelineFrame); |
157 | } |
158 | |
159 | const QGstCaps &QGstVideoRenderer::caps() |
160 | { |
161 | return m_surfaceCaps; |
162 | } |
163 | |
164 | bool QGstVideoRenderer::start(const QGstCaps& caps) |
165 | { |
166 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::start"<< caps; |
167 | |
168 | auto optionalFormatAndVideoInfo = caps.formatAndVideoInfo(); |
169 | if (optionalFormatAndVideoInfo) { |
170 | std::tie(args&: m_format, args&: m_videoInfo) = std::move(*optionalFormatAndVideoInfo); |
171 | } else { |
172 | m_format = {}; |
173 | m_videoInfo = {}; |
174 | } |
175 | m_memoryFormat = caps.memoryFormat(); |
176 | |
177 | // NOTE: m_format will not be fully populated until GST_EVENT_TAG is processed |
178 | |
179 | return true; |
180 | } |
181 | |
182 | void QGstVideoRenderer::stop() |
183 | { |
184 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::stop"; |
185 | |
186 | m_bufferQueue.clear(); |
187 | QCoreApplication::postEvent(receiver: this, event: new QEvent(stopEvent)); |
188 | } |
189 | |
190 | void QGstVideoRenderer::unlock() |
191 | { |
192 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::unlock"; |
193 | } |
194 | |
195 | bool QGstVideoRenderer::proposeAllocation(GstQuery *) |
196 | { |
197 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::proposeAllocation"; |
198 | return true; |
199 | } |
200 | |
201 | GstFlowReturn QGstVideoRenderer::render(GstBuffer *buffer) |
202 | { |
203 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::render"; |
204 | |
205 | if (m_flushing) { |
206 | qCDebug(qLcGstVideoRenderer) |
207 | << " buffer received while flushing the sink ... discarding buffer"; |
208 | return GST_FLOW_FLUSHING; |
209 | } |
210 | |
211 | GstVideoCropMeta *meta = gst_buffer_get_video_crop_meta(buffer); |
212 | if (meta) { |
213 | QRect vp(meta->x, meta->y, meta->width, meta->height); |
214 | if (m_format.viewport() != vp) { |
215 | qCDebug(qLcGstVideoRenderer) |
216 | << Q_FUNC_INFO << " Update viewport on Metadata: ["<< meta->height << "x" |
217 | << meta->width << " | "<< meta->x << "x"<< meta->y << "]"; |
218 | // Update viewport if data is not the same |
219 | m_format.setViewport(vp); |
220 | } |
221 | } |
222 | |
223 | RenderBufferState state{ |
224 | .buffer = QGstBufferHandle{ buffer, QGstBufferHandle::NeedsRef }, |
225 | .format = m_format, |
226 | .memoryFormat = m_memoryFormat, |
227 | }; |
228 | |
229 | qCDebug(qLcGstVideoRenderer) << " sending video frame"; |
230 | |
231 | qsizetype sizeOfQueue = m_bufferQueue.enqueue(value: std::move(state)); |
232 | if (sizeOfQueue == 1) |
233 | // we only need to wake up, if we don't have a pending frame |
234 | QCoreApplication::postEvent(receiver: this, event: new QEvent(renderFramesEvent)); |
235 | |
236 | return GST_FLOW_OK; |
237 | } |
238 | |
239 | bool QGstVideoRenderer::query(GstQuery *query) |
240 | { |
241 | #if QT_CONFIG(gstreamer_gl) |
242 | if (GST_QUERY_TYPE(query) == GST_QUERY_CONTEXT) { |
243 | const gchar *type; |
244 | gst_query_parse_context_type(query, context_type: &type); |
245 | |
246 | if (strcmp(s1: type, s2: "gst.gl.local_context") != 0) |
247 | return false; |
248 | |
249 | QMutexLocker locker(&m_sinkMutex); |
250 | if (!m_sink) |
251 | return false; |
252 | |
253 | auto *gstGlContext = m_sink->gstGlLocalContext(); |
254 | if (!gstGlContext) |
255 | return false; |
256 | |
257 | gst_query_set_context(query, context: gstGlContext); |
258 | |
259 | return true; |
260 | } |
261 | #else |
262 | Q_UNUSED(query); |
263 | #endif |
264 | return false; |
265 | } |
266 | |
267 | void QGstVideoRenderer::gstEvent(GstEvent *event) |
268 | { |
269 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::gstEvent:"<< event; |
270 | |
271 | switch (GST_EVENT_TYPE(event)) { |
272 | case GST_EVENT_TAG: |
273 | return gstEventHandleTag(event); |
274 | case GST_EVENT_EOS: |
275 | return gstEventHandleEOS(event); |
276 | case GST_EVENT_FLUSH_START: |
277 | return gstEventHandleFlushStart(event); |
278 | case GST_EVENT_FLUSH_STOP: |
279 | return gstEventHandleFlushStop(event); |
280 | |
281 | default: |
282 | return; |
283 | } |
284 | } |
285 | |
286 | void QGstVideoRenderer::setActive(bool isActive) |
287 | { |
288 | if (isActive == m_isActive) |
289 | return; |
290 | |
291 | m_isActive = isActive; |
292 | if (isActive) |
293 | updateCurrentVideoFrame(m_currentPipelineFrame); |
294 | else |
295 | updateCurrentVideoFrame({}); |
296 | } |
297 | |
298 | void QGstVideoRenderer::updateCurrentVideoFrame(QVideoFrame frame) |
299 | { |
300 | m_currentVideoFrame = std::move(frame); |
301 | if (m_sink) |
302 | m_sink->setVideoFrame(m_currentVideoFrame); |
303 | } |
304 | |
305 | void QGstVideoRenderer::gstEventHandleTag(GstEvent *event) |
306 | { |
307 | GstTagList *taglist = nullptr; |
308 | gst_event_parse_tag(event, taglist: &taglist); |
309 | if (!taglist) |
310 | return; |
311 | |
312 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::gstEventHandleTag:"<< taglist; |
313 | |
314 | QGString value; |
315 | if (!gst_tag_list_get_string(list: taglist, GST_TAG_IMAGE_ORIENTATION, value: &value)) |
316 | return; |
317 | |
318 | RotationResult parsed = parseRotationTag(value.get()); |
319 | |
320 | m_format.setMirrored(parsed.flip); |
321 | m_format.setRotation(parsed.rotation); |
322 | } |
323 | |
324 | void QGstVideoRenderer::gstEventHandleEOS(GstEvent *) |
325 | { |
326 | stop(); |
327 | } |
328 | |
329 | void QGstVideoRenderer::gstEventHandleFlushStart(GstEvent *) |
330 | { |
331 | // "data is to be discarded" |
332 | m_flushing = true; |
333 | m_bufferQueue.clear(); |
334 | } |
335 | |
336 | void QGstVideoRenderer::gstEventHandleFlushStop(GstEvent *) |
337 | { |
338 | // "data is allowed again" |
339 | m_flushing = false; |
340 | } |
341 | |
342 | static GstVideoSinkClass *gvrs_sink_parent_class; |
343 | static thread_local QGstreamerVideoSink *gvrs_current_sink; |
344 | |
345 | #define VO_SINK(s) QGstVideoRendererSink *sink(reinterpret_cast<QGstVideoRendererSink *>(s)) |
346 | |
347 | QGstVideoRendererSinkElement QGstVideoRendererSink::createSink(QGstreamerVideoSink *sink) |
348 | { |
349 | setSink(sink); |
350 | QGstVideoRendererSink *gstSink = reinterpret_cast<QGstVideoRendererSink *>( |
351 | g_object_new(object_type: QGstVideoRendererSink::get_type(), first_property_name: nullptr)); |
352 | |
353 | return QGstVideoRendererSinkElement{ |
354 | gstSink, |
355 | QGstElement::NeedsRef, |
356 | }; |
357 | } |
358 | |
359 | void QGstVideoRendererSink::setSink(QGstreamerVideoSink *sink) |
360 | { |
361 | gvrs_current_sink = sink; |
362 | } |
363 | |
364 | GType QGstVideoRendererSink::get_type() |
365 | { |
366 | static const GTypeInfo info = |
367 | { |
368 | .class_size: sizeof(QGstVideoRendererSinkClass), // class_size |
369 | .base_init: base_init, // base_init |
370 | .base_finalize: nullptr, // base_finalize |
371 | .class_init: class_init, // class_init |
372 | .class_finalize: nullptr, // class_finalize |
373 | .class_data: nullptr, // class_data |
374 | .instance_size: sizeof(QGstVideoRendererSink), // instance_size |
375 | .n_preallocs: 0, // n_preallocs |
376 | .instance_init: instance_init, // instance_init |
377 | .value_table: nullptr // value_table |
378 | }; |
379 | |
380 | static const GType type = g_type_register_static(GST_TYPE_VIDEO_SINK, type_name: "QGstVideoRendererSink", |
381 | info: &info, flags: GTypeFlags(0)); |
382 | |
383 | return type; |
384 | } |
385 | |
386 | void QGstVideoRendererSink::class_init(gpointer g_class, gpointer class_data) |
387 | { |
388 | Q_UNUSED(class_data); |
389 | |
390 | gvrs_sink_parent_class = reinterpret_cast<GstVideoSinkClass *>(g_type_class_peek_parent(g_class)); |
391 | |
392 | GstVideoSinkClass *video_sink_class = reinterpret_cast<GstVideoSinkClass *>(g_class); |
393 | video_sink_class->show_frame = QGstVideoRendererSink::show_frame; |
394 | |
395 | GstBaseSinkClass *base_sink_class = reinterpret_cast<GstBaseSinkClass *>(g_class); |
396 | base_sink_class->get_caps = QGstVideoRendererSink::get_caps; |
397 | base_sink_class->set_caps = QGstVideoRendererSink::set_caps; |
398 | base_sink_class->propose_allocation = QGstVideoRendererSink::propose_allocation; |
399 | base_sink_class->stop = QGstVideoRendererSink::stop; |
400 | base_sink_class->unlock = QGstVideoRendererSink::unlock; |
401 | base_sink_class->query = QGstVideoRendererSink::query; |
402 | base_sink_class->event = QGstVideoRendererSink::event; |
403 | |
404 | GstElementClass *element_class = reinterpret_cast<GstElementClass *>(g_class); |
405 | element_class->change_state = QGstVideoRendererSink::change_state; |
406 | gst_element_class_set_metadata(klass: element_class, |
407 | longname: "Qt built-in video renderer sink", |
408 | classification: "Sink/Video", |
409 | description: "Qt default built-in video renderer sink", |
410 | author: "The Qt Company"); |
411 | |
412 | GObjectClass *object_class = reinterpret_cast<GObjectClass *>(g_class); |
413 | object_class->finalize = QGstVideoRendererSink::finalize; |
414 | } |
415 | |
416 | void QGstVideoRendererSink::base_init(gpointer g_class) |
417 | { |
418 | static GstStaticPadTemplate sink_pad_template = GST_STATIC_PAD_TEMPLATE( |
419 | "sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS( |
420 | "video/x-raw, " |
421 | "framerate = (fraction) [ 0, MAX ], " |
422 | "width = (int) [ 1, MAX ], " |
423 | "height = (int) [ 1, MAX ]")); |
424 | |
425 | gst_element_class_add_pad_template( |
426 | GST_ELEMENT_CLASS(g_class), templ: gst_static_pad_template_get(pad_template: &sink_pad_template)); |
427 | } |
428 | |
429 | void QGstVideoRendererSink::instance_init(GTypeInstance *instance, gpointer g_class) |
430 | { |
431 | Q_UNUSED(g_class); |
432 | VO_SINK(instance); |
433 | |
434 | Q_ASSERT(gvrs_current_sink); |
435 | |
436 | sink->renderer = new QGstVideoRenderer(gvrs_current_sink); |
437 | sink->renderer->moveToThread(thread: gvrs_current_sink->thread()); |
438 | gvrs_current_sink = nullptr; |
439 | } |
440 | |
441 | void QGstVideoRendererSink::finalize(GObject *object) |
442 | { |
443 | VO_SINK(object); |
444 | |
445 | delete sink->renderer; |
446 | |
447 | // Chain up |
448 | G_OBJECT_CLASS(gvrs_sink_parent_class)->finalize(object); |
449 | } |
450 | |
451 | GstStateChangeReturn QGstVideoRendererSink::change_state( |
452 | GstElement *element, GstStateChange transition) |
453 | { |
454 | GstStateChangeReturn ret = |
455 | GST_ELEMENT_CLASS(gvrs_sink_parent_class)->change_state(element, transition); |
456 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::change_state:"<< transition << ret; |
457 | return ret; |
458 | } |
459 | |
460 | GstCaps *QGstVideoRendererSink::get_caps(GstBaseSink *base, GstCaps *filter) |
461 | { |
462 | VO_SINK(base); |
463 | |
464 | QGstCaps caps = sink->renderer->caps(); |
465 | if (filter) |
466 | caps = QGstCaps(gst_caps_intersect(caps1: caps.caps(), caps2: filter), QGstCaps::HasRef); |
467 | |
468 | return caps.release(); |
469 | } |
470 | |
471 | gboolean QGstVideoRendererSink::set_caps(GstBaseSink *base, GstCaps *gcaps) |
472 | { |
473 | VO_SINK(base); |
474 | auto caps = QGstCaps(gcaps, QGstCaps::NeedsRef); |
475 | |
476 | qCDebug(qLcGstVideoRenderer) << "set_caps:"<< caps; |
477 | |
478 | if (caps.isNull()) { |
479 | sink->renderer->stop(); |
480 | return TRUE; |
481 | } |
482 | |
483 | return sink->renderer->start(caps); |
484 | } |
485 | |
486 | gboolean QGstVideoRendererSink::propose_allocation(GstBaseSink *base, GstQuery *query) |
487 | { |
488 | VO_SINK(base); |
489 | return sink->renderer->proposeAllocation(query); |
490 | } |
491 | |
492 | gboolean QGstVideoRendererSink::stop(GstBaseSink *base) |
493 | { |
494 | VO_SINK(base); |
495 | sink->renderer->stop(); |
496 | return TRUE; |
497 | } |
498 | |
499 | gboolean QGstVideoRendererSink::unlock(GstBaseSink *base) |
500 | { |
501 | VO_SINK(base); |
502 | sink->renderer->unlock(); |
503 | return TRUE; |
504 | } |
505 | |
506 | GstFlowReturn QGstVideoRendererSink::show_frame(GstVideoSink *base, GstBuffer *buffer) |
507 | { |
508 | VO_SINK(base); |
509 | return sink->renderer->render(buffer); |
510 | } |
511 | |
512 | gboolean QGstVideoRendererSink::query(GstBaseSink *base, GstQuery *query) |
513 | { |
514 | VO_SINK(base); |
515 | if (sink->renderer->query(query)) |
516 | return TRUE; |
517 | |
518 | return GST_BASE_SINK_CLASS(gvrs_sink_parent_class)->query(base, query); |
519 | } |
520 | |
521 | gboolean QGstVideoRendererSink::event(GstBaseSink *base, GstEvent * event) |
522 | { |
523 | VO_SINK(base); |
524 | sink->renderer->gstEvent(event); |
525 | return GST_BASE_SINK_CLASS(gvrs_sink_parent_class)->event(base, event); |
526 | } |
527 | |
528 | QGstVideoRendererSinkElement::QGstVideoRendererSinkElement(QGstVideoRendererSink *element, |
529 | RefMode mode) |
530 | : QGstBaseSink{ |
531 | qGstCheckedCast<GstBaseSink>(arg: element), |
532 | mode, |
533 | } |
534 | { |
535 | } |
536 | |
537 | void QGstVideoRendererSinkElement::setActive(bool isActive) |
538 | { |
539 | qGstVideoRendererSink()->renderer->setActive(isActive); |
540 | } |
541 | |
542 | QGstVideoRendererSink *QGstVideoRendererSinkElement::qGstVideoRendererSink() const |
543 | { |
544 | return reinterpret_cast<QGstVideoRendererSink *>(element()); |
545 | } |
546 | |
547 | QT_END_NAMESPACE |
548 |
Definitions
- qLcGstVideoRenderer
- QGstVideoRenderer
- ~QGstVideoRenderer
- createSurfaceCaps
- customEvent
- handleNewBuffer
- caps
- start
- stop
- unlock
- proposeAllocation
- render
- query
- gstEvent
- setActive
- updateCurrentVideoFrame
- gstEventHandleTag
- gstEventHandleEOS
- gstEventHandleFlushStart
- gstEventHandleFlushStop
- gvrs_sink_parent_class
- gvrs_current_sink
- createSink
- setSink
- get_type
- class_init
- base_init
- instance_init
- finalize
- change_state
- get_caps
- set_caps
- propose_allocation
- stop
- unlock
- show_frame
- query
- event
- QGstVideoRendererSinkElement
- setActive
Learn to use CMake with our Intro Training
Find out more