1 | // Copyright (C) 2016 Jolla Ltd. |
---|---|
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "qgstvideorenderersink_p.h" |
5 | |
6 | #include <QtMultimedia/qvideoframe.h> |
7 | #include <QtMultimedia/qvideosink.h> |
8 | #include <QtMultimedia/private/qvideoframe_p.h> |
9 | #include <QtGui/rhi/qrhi.h> |
10 | #include <QtCore/qcoreapplication.h> |
11 | #include <QtCore/qdebug.h> |
12 | #include <QtCore/qloggingcategory.h> |
13 | #include <QtCore/private/qfactoryloader_p.h> |
14 | #include <QtCore/private/quniquehandle_p.h> |
15 | |
16 | #include <common/qgst_debug_p.h> |
17 | #include <common/qgstreamermetadata_p.h> |
18 | #include <common/qgstreamervideosink_p.h> |
19 | #include <common/qgstutils_p.h> |
20 | #include <common/qgstvideobuffer_p.h> |
21 | |
22 | #include <gst/video/video.h> |
23 | #include <gst/video/gstvideometa.h> |
24 | |
25 | |
26 | #if QT_CONFIG(gstreamer_gl) |
27 | #include <gst/gl/gl.h> |
28 | #endif // #if QT_CONFIG(gstreamer_gl) |
29 | |
30 | // DMA support |
31 | #if QT_CONFIG(gstreamer_gl_egl) && QT_CONFIG(linux_dmabuf) |
32 | # include <gst/allocators/gstdmabuf.h> |
33 | #endif |
34 | |
35 | // NOLINTBEGIN(readability-convert-member-functions-to-static) |
36 | |
37 | static Q_LOGGING_CATEGORY(qLcGstVideoRenderer, "qt.multimedia.gstvideorenderer") |
38 | |
39 | QT_BEGIN_NAMESPACE |
40 | |
41 | QGstVideoRenderer::QGstVideoRenderer(QGstreamerVideoSink *sink) |
42 | : m_sink(sink), m_surfaceCaps(createSurfaceCaps(sink)) |
43 | { |
44 | QObject::connect( |
45 | sender: sink, signal: &QGstreamerVideoSink::aboutToBeDestroyed, context: this, |
46 | slot: [this] { |
47 | QMutexLocker locker(&m_sinkMutex); |
48 | m_sink = nullptr; |
49 | }, |
50 | type: Qt::DirectConnection); |
51 | } |
52 | |
53 | QGstVideoRenderer::~QGstVideoRenderer() = default; |
54 | |
55 | QGstCaps QGstVideoRenderer::createSurfaceCaps([[maybe_unused]] QGstreamerVideoSink *sink) |
56 | { |
57 | QGstCaps caps = QGstCaps::create(); |
58 | |
59 | // All the formats that both we and gstreamer support |
60 | auto formats = QList<QVideoFrameFormat::PixelFormat>() |
61 | << QVideoFrameFormat::Format_YUV420P |
62 | << QVideoFrameFormat::Format_YUV422P |
63 | << QVideoFrameFormat::Format_YV12 |
64 | << QVideoFrameFormat::Format_UYVY |
65 | << QVideoFrameFormat::Format_YUYV |
66 | << QVideoFrameFormat::Format_NV12 |
67 | << QVideoFrameFormat::Format_NV21 |
68 | << QVideoFrameFormat::Format_AYUV |
69 | << QVideoFrameFormat::Format_P010 |
70 | << QVideoFrameFormat::Format_XRGB8888 |
71 | << QVideoFrameFormat::Format_XBGR8888 |
72 | << QVideoFrameFormat::Format_RGBX8888 |
73 | << QVideoFrameFormat::Format_BGRX8888 |
74 | << QVideoFrameFormat::Format_ARGB8888 |
75 | << QVideoFrameFormat::Format_ABGR8888 |
76 | << QVideoFrameFormat::Format_RGBA8888 |
77 | << QVideoFrameFormat::Format_BGRA8888 |
78 | << QVideoFrameFormat::Format_Y8 |
79 | << QVideoFrameFormat::Format_Y16 |
80 | ; |
81 | #if QT_CONFIG(gstreamer_gl) |
82 | QRhi *rhi = sink->rhi(); |
83 | if (rhi && rhi->backend() == QRhi::OpenGLES2) { |
84 | caps.addPixelFormats(formats, GST_CAPS_FEATURE_MEMORY_GL_MEMORY); |
85 | # if QT_CONFIG(gstreamer_gl_egl) && QT_CONFIG(linux_dmabuf) |
86 | if (sink->eglDisplay() && sink->eglImageTargetTexture2D()) { |
87 | // We currently do not handle planar DMA buffers, as it's somewhat unclear how to |
88 | // convert the planar EGLImage into something we can use from OpenGL |
89 | auto singlePlaneFormats = QList<QVideoFrameFormat::PixelFormat>() |
90 | << QVideoFrameFormat::Format_UYVY |
91 | << QVideoFrameFormat::Format_YUYV |
92 | << QVideoFrameFormat::Format_AYUV |
93 | << QVideoFrameFormat::Format_XRGB8888 |
94 | << QVideoFrameFormat::Format_XBGR8888 |
95 | << QVideoFrameFormat::Format_RGBX8888 |
96 | << QVideoFrameFormat::Format_BGRX8888 |
97 | << QVideoFrameFormat::Format_ARGB8888 |
98 | << QVideoFrameFormat::Format_ABGR8888 |
99 | << QVideoFrameFormat::Format_RGBA8888 |
100 | << QVideoFrameFormat::Format_BGRA8888 |
101 | << QVideoFrameFormat::Format_Y8 |
102 | << QVideoFrameFormat::Format_Y16 |
103 | ; |
104 | caps.addPixelFormats(formats: singlePlaneFormats, GST_CAPS_FEATURE_MEMORY_DMABUF); |
105 | } |
106 | # endif |
107 | } |
108 | #endif |
109 | caps.addPixelFormats(formats); |
110 | return caps; |
111 | } |
112 | |
113 | void QGstVideoRenderer::customEvent(QEvent *event) |
114 | { |
115 | QT_WARNING_PUSH |
116 | QT_WARNING_DISABLE_GCC("-Wswitch") // case value not in enumerated type ‘QEvent::Type’ |
117 | |
118 | switch (event->type()) { |
119 | case renderFramesEvent: { |
120 | // LATER: we currently show every frame. however it may be reasonable to drop frames |
121 | // here if the queue contains more than one frame |
122 | while (std::optional<RenderBufferState> nextState = m_bufferQueue.dequeue()) |
123 | handleNewBuffer(std::move(*nextState)); |
124 | return; |
125 | } |
126 | case stopEvent: { |
127 | m_currentPipelineFrame = {}; |
128 | updateCurrentVideoFrame(m_currentVideoFrame); |
129 | return; |
130 | } |
131 | |
132 | default: |
133 | return; |
134 | } |
135 | QT_WARNING_POP |
136 | } |
137 | |
138 | |
139 | void QGstVideoRenderer::handleNewBuffer(RenderBufferState state) |
140 | { |
141 | auto videoBuffer = std::make_unique<QGstVideoBuffer>(args&: state.buffer, args&: m_videoInfo, args&: m_sink, |
142 | args&: state.format, args&: state.memoryFormat); |
143 | QVideoFrame frame = QVideoFramePrivate::createFrame(buffer: std::move(videoBuffer), format: state.format); |
144 | QGstUtils::setFrameTimeStampsFromBuffer(frame: &frame, buffer: state.buffer.get()); |
145 | m_currentPipelineFrame = std::move(frame); |
146 | |
147 | if (!m_isActive) { |
148 | qCDebug(qLcGstVideoRenderer) << " showing empty video frame"; |
149 | updateCurrentVideoFrame({}); |
150 | return; |
151 | } |
152 | |
153 | updateCurrentVideoFrame(m_currentPipelineFrame); |
154 | } |
155 | |
156 | const QGstCaps &QGstVideoRenderer::caps() |
157 | { |
158 | return m_surfaceCaps; |
159 | } |
160 | |
161 | bool QGstVideoRenderer::start(const QGstCaps& caps) |
162 | { |
163 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::start"<< caps; |
164 | |
165 | auto optionalFormatAndVideoInfo = caps.formatAndVideoInfo(); |
166 | if (optionalFormatAndVideoInfo) { |
167 | std::tie(args&: m_format, args&: m_videoInfo) = std::move(*optionalFormatAndVideoInfo); |
168 | } else { |
169 | m_format = {}; |
170 | m_videoInfo = {}; |
171 | } |
172 | m_memoryFormat = caps.memoryFormat(); |
173 | |
174 | // NOTE: m_format will not be fully populated until GST_EVENT_TAG is processed |
175 | |
176 | return true; |
177 | } |
178 | |
179 | void QGstVideoRenderer::stop() |
180 | { |
181 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::stop"; |
182 | |
183 | m_bufferQueue.clear(); |
184 | QCoreApplication::postEvent(receiver: this, event: new QEvent(stopEvent)); |
185 | } |
186 | |
187 | void QGstVideoRenderer::unlock() |
188 | { |
189 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::unlock"; |
190 | } |
191 | |
192 | bool QGstVideoRenderer::proposeAllocation(GstQuery *) |
193 | { |
194 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::proposeAllocation"; |
195 | return true; |
196 | } |
197 | |
198 | GstFlowReturn QGstVideoRenderer::render(GstBuffer *buffer) |
199 | { |
200 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::render"; |
201 | |
202 | if (m_flushing) { |
203 | qCDebug(qLcGstVideoRenderer) |
204 | << " buffer received while flushing the sink ... discarding buffer"; |
205 | return GST_FLOW_FLUSHING; |
206 | } |
207 | |
208 | GstVideoCropMeta *meta = gst_buffer_get_video_crop_meta(buffer); |
209 | if (meta) { |
210 | QRect vp(meta->x, meta->y, meta->width, meta->height); |
211 | if (m_format.viewport() != vp) { |
212 | qCDebug(qLcGstVideoRenderer) |
213 | << Q_FUNC_INFO << " Update viewport on Metadata: ["<< meta->height << "x" |
214 | << meta->width << " | "<< meta->x << "x"<< meta->y << "]"; |
215 | // Update viewport if data is not the same |
216 | m_format.setViewport(vp); |
217 | } |
218 | } |
219 | |
220 | RenderBufferState state{ |
221 | .buffer: QGstBufferHandle{ buffer, QGstBufferHandle::NeedsRef }, |
222 | .format: m_format, |
223 | .memoryFormat: m_memoryFormat, |
224 | }; |
225 | |
226 | qCDebug(qLcGstVideoRenderer) << " sending video frame"; |
227 | |
228 | qsizetype sizeOfQueue = m_bufferQueue.enqueue(value: std::move(state)); |
229 | if (sizeOfQueue == 1) |
230 | // we only need to wake up, if we don't have a pending frame |
231 | QCoreApplication::postEvent(receiver: this, event: new QEvent(renderFramesEvent)); |
232 | |
233 | return GST_FLOW_OK; |
234 | } |
235 | |
236 | bool QGstVideoRenderer::query(GstQuery *query) |
237 | { |
238 | #if QT_CONFIG(gstreamer_gl) |
239 | if (GST_QUERY_TYPE(query) == GST_QUERY_CONTEXT) { |
240 | const gchar *type; |
241 | gst_query_parse_context_type(query, context_type: &type); |
242 | |
243 | if (strcmp(s1: type, s2: "gst.gl.local_context") != 0) |
244 | return false; |
245 | |
246 | QMutexLocker locker(&m_sinkMutex); |
247 | if (!m_sink) |
248 | return false; |
249 | |
250 | auto *gstGlContext = m_sink->gstGlLocalContext(); |
251 | if (!gstGlContext) |
252 | return false; |
253 | |
254 | gst_query_set_context(query, context: gstGlContext); |
255 | |
256 | return true; |
257 | } |
258 | #else |
259 | Q_UNUSED(query); |
260 | #endif |
261 | return false; |
262 | } |
263 | |
264 | void QGstVideoRenderer::gstEvent(GstEvent *event) |
265 | { |
266 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::gstEvent:"<< event; |
267 | |
268 | switch (GST_EVENT_TYPE(event)) { |
269 | case GST_EVENT_TAG: |
270 | return gstEventHandleTag(event); |
271 | case GST_EVENT_EOS: |
272 | return gstEventHandleEOS(event); |
273 | case GST_EVENT_FLUSH_START: |
274 | return gstEventHandleFlushStart(event); |
275 | case GST_EVENT_FLUSH_STOP: |
276 | return gstEventHandleFlushStop(event); |
277 | |
278 | default: |
279 | return; |
280 | } |
281 | } |
282 | |
283 | void QGstVideoRenderer::setActive(bool isActive) |
284 | { |
285 | if (isActive == m_isActive) |
286 | return; |
287 | |
288 | m_isActive = isActive; |
289 | if (isActive) |
290 | updateCurrentVideoFrame(m_currentPipelineFrame); |
291 | else |
292 | updateCurrentVideoFrame({}); |
293 | } |
294 | |
295 | void QGstVideoRenderer::updateCurrentVideoFrame(QVideoFrame frame) |
296 | { |
297 | m_currentVideoFrame = std::move(frame); |
298 | if (m_sink) |
299 | m_sink->setVideoFrame(m_currentVideoFrame); |
300 | } |
301 | |
302 | void QGstVideoRenderer::gstEventHandleTag(GstEvent *event) |
303 | { |
304 | GstTagList *taglist = nullptr; |
305 | gst_event_parse_tag(event, taglist: &taglist); |
306 | if (!taglist) |
307 | return; |
308 | |
309 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::gstEventHandleTag:"<< taglist; |
310 | |
311 | QGString value; |
312 | if (!gst_tag_list_get_string(list: taglist, GST_TAG_IMAGE_ORIENTATION, value: &value)) |
313 | return; |
314 | |
315 | RotationResult parsed = parseRotationTag(value.get()); |
316 | |
317 | m_format.setMirrored(parsed.flip); |
318 | m_format.setRotation(parsed.rotation); |
319 | } |
320 | |
321 | void QGstVideoRenderer::gstEventHandleEOS(GstEvent *) |
322 | { |
323 | stop(); |
324 | } |
325 | |
326 | void QGstVideoRenderer::gstEventHandleFlushStart(GstEvent *) |
327 | { |
328 | // "data is to be discarded" |
329 | m_flushing = true; |
330 | m_bufferQueue.clear(); |
331 | } |
332 | |
333 | void QGstVideoRenderer::gstEventHandleFlushStop(GstEvent *) |
334 | { |
335 | // "data is allowed again" |
336 | m_flushing = false; |
337 | } |
338 | |
339 | static GstVideoSinkClass *gvrs_sink_parent_class; |
340 | static thread_local QGstreamerVideoSink *gvrs_current_sink; |
341 | |
342 | #define VO_SINK(s) QGstVideoRendererSink *sink(reinterpret_cast<QGstVideoRendererSink *>(s)) |
343 | |
344 | QGstVideoRendererSinkElement QGstVideoRendererSink::createSink(QGstreamerVideoSink *sink) |
345 | { |
346 | setSink(sink); |
347 | QGstVideoRendererSink *gstSink = reinterpret_cast<QGstVideoRendererSink *>( |
348 | g_object_new(object_type: QGstVideoRendererSink::get_type(), first_property_name: nullptr)); |
349 | |
350 | return QGstVideoRendererSinkElement{ |
351 | gstSink, |
352 | QGstElement::NeedsRef, |
353 | }; |
354 | } |
355 | |
356 | void QGstVideoRendererSink::setSink(QGstreamerVideoSink *sink) |
357 | { |
358 | gvrs_current_sink = sink; |
359 | } |
360 | |
361 | GType QGstVideoRendererSink::get_type() |
362 | { |
363 | static const GTypeInfo info = |
364 | { |
365 | .class_size: sizeof(QGstVideoRendererSinkClass), // class_size |
366 | .base_init: base_init, // base_init |
367 | .base_finalize: nullptr, // base_finalize |
368 | .class_init: class_init, // class_init |
369 | .class_finalize: nullptr, // class_finalize |
370 | .class_data: nullptr, // class_data |
371 | .instance_size: sizeof(QGstVideoRendererSink), // instance_size |
372 | .n_preallocs: 0, // n_preallocs |
373 | .instance_init: instance_init, // instance_init |
374 | .value_table: nullptr // value_table |
375 | }; |
376 | |
377 | static const GType type = g_type_register_static(GST_TYPE_VIDEO_SINK, type_name: "QGstVideoRendererSink", |
378 | info: &info, flags: GTypeFlags(0)); |
379 | |
380 | return type; |
381 | } |
382 | |
383 | void QGstVideoRendererSink::class_init(gpointer g_class, gpointer class_data) |
384 | { |
385 | Q_UNUSED(class_data); |
386 | |
387 | gvrs_sink_parent_class = reinterpret_cast<GstVideoSinkClass *>(g_type_class_peek_parent(g_class)); |
388 | |
389 | GstVideoSinkClass *video_sink_class = reinterpret_cast<GstVideoSinkClass *>(g_class); |
390 | video_sink_class->show_frame = QGstVideoRendererSink::show_frame; |
391 | |
392 | GstBaseSinkClass *base_sink_class = reinterpret_cast<GstBaseSinkClass *>(g_class); |
393 | base_sink_class->get_caps = QGstVideoRendererSink::get_caps; |
394 | base_sink_class->set_caps = QGstVideoRendererSink::set_caps; |
395 | base_sink_class->propose_allocation = QGstVideoRendererSink::propose_allocation; |
396 | base_sink_class->stop = QGstVideoRendererSink::stop; |
397 | base_sink_class->unlock = QGstVideoRendererSink::unlock; |
398 | base_sink_class->query = QGstVideoRendererSink::query; |
399 | base_sink_class->event = QGstVideoRendererSink::event; |
400 | |
401 | GstElementClass *element_class = reinterpret_cast<GstElementClass *>(g_class); |
402 | element_class->change_state = QGstVideoRendererSink::change_state; |
403 | gst_element_class_set_metadata(klass: element_class, |
404 | longname: "Qt built-in video renderer sink", |
405 | classification: "Sink/Video", |
406 | description: "Qt default built-in video renderer sink", |
407 | author: "The Qt Company"); |
408 | |
409 | GObjectClass *object_class = reinterpret_cast<GObjectClass *>(g_class); |
410 | object_class->finalize = QGstVideoRendererSink::finalize; |
411 | } |
412 | |
413 | void QGstVideoRendererSink::base_init(gpointer g_class) |
414 | { |
415 | static GstStaticPadTemplate sink_pad_template = GST_STATIC_PAD_TEMPLATE( |
416 | "sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS( |
417 | "video/x-raw, " |
418 | "framerate = (fraction) [ 0, MAX ], " |
419 | "width = (int) [ 1, MAX ], " |
420 | "height = (int) [ 1, MAX ]")); |
421 | |
422 | gst_element_class_add_pad_template( |
423 | GST_ELEMENT_CLASS(g_class), templ: gst_static_pad_template_get(pad_template: &sink_pad_template)); |
424 | } |
425 | |
426 | void QGstVideoRendererSink::instance_init(GTypeInstance *instance, gpointer g_class) |
427 | { |
428 | Q_UNUSED(g_class); |
429 | VO_SINK(instance); |
430 | |
431 | Q_ASSERT(gvrs_current_sink); |
432 | |
433 | sink->renderer = new QGstVideoRenderer(gvrs_current_sink); |
434 | sink->renderer->moveToThread(thread: gvrs_current_sink->thread()); |
435 | gvrs_current_sink = nullptr; |
436 | } |
437 | |
438 | void QGstVideoRendererSink::finalize(GObject *object) |
439 | { |
440 | VO_SINK(object); |
441 | |
442 | delete sink->renderer; |
443 | |
444 | // Chain up |
445 | G_OBJECT_CLASS(gvrs_sink_parent_class)->finalize(object); |
446 | } |
447 | |
448 | GstStateChangeReturn QGstVideoRendererSink::change_state( |
449 | GstElement *element, GstStateChange transition) |
450 | { |
451 | GstStateChangeReturn ret = |
452 | GST_ELEMENT_CLASS(gvrs_sink_parent_class)->change_state(element, transition); |
453 | qCDebug(qLcGstVideoRenderer) << "QGstVideoRenderer::change_state:"<< transition << ret; |
454 | return ret; |
455 | } |
456 | |
457 | GstCaps *QGstVideoRendererSink::get_caps(GstBaseSink *base, GstCaps *filter) |
458 | { |
459 | VO_SINK(base); |
460 | |
461 | QGstCaps caps = sink->renderer->caps(); |
462 | if (filter) |
463 | caps = QGstCaps(gst_caps_intersect(caps1: caps.caps(), caps2: filter), QGstCaps::HasRef); |
464 | |
465 | return caps.release(); |
466 | } |
467 | |
468 | gboolean QGstVideoRendererSink::set_caps(GstBaseSink *base, GstCaps *gcaps) |
469 | { |
470 | VO_SINK(base); |
471 | auto caps = QGstCaps(gcaps, QGstCaps::NeedsRef); |
472 | |
473 | qCDebug(qLcGstVideoRenderer) << "set_caps:"<< caps; |
474 | |
475 | if (!caps) { |
476 | sink->renderer->stop(); |
477 | return TRUE; |
478 | } |
479 | |
480 | return sink->renderer->start(caps); |
481 | } |
482 | |
483 | gboolean QGstVideoRendererSink::propose_allocation(GstBaseSink *base, GstQuery *query) |
484 | { |
485 | VO_SINK(base); |
486 | return sink->renderer->proposeAllocation(query); |
487 | } |
488 | |
489 | gboolean QGstVideoRendererSink::stop(GstBaseSink *base) |
490 | { |
491 | VO_SINK(base); |
492 | sink->renderer->stop(); |
493 | return TRUE; |
494 | } |
495 | |
496 | gboolean QGstVideoRendererSink::unlock(GstBaseSink *base) |
497 | { |
498 | VO_SINK(base); |
499 | sink->renderer->unlock(); |
500 | return TRUE; |
501 | } |
502 | |
503 | GstFlowReturn QGstVideoRendererSink::show_frame(GstVideoSink *base, GstBuffer *buffer) |
504 | { |
505 | VO_SINK(base); |
506 | return sink->renderer->render(buffer); |
507 | } |
508 | |
509 | gboolean QGstVideoRendererSink::query(GstBaseSink *base, GstQuery *query) |
510 | { |
511 | VO_SINK(base); |
512 | if (sink->renderer->query(query)) |
513 | return TRUE; |
514 | |
515 | return GST_BASE_SINK_CLASS(gvrs_sink_parent_class)->query(base, query); |
516 | } |
517 | |
518 | gboolean QGstVideoRendererSink::event(GstBaseSink *base, GstEvent * event) |
519 | { |
520 | VO_SINK(base); |
521 | sink->renderer->gstEvent(event); |
522 | return GST_BASE_SINK_CLASS(gvrs_sink_parent_class)->event(base, event); |
523 | } |
524 | |
525 | QGstVideoRendererSinkElement::QGstVideoRendererSinkElement(QGstVideoRendererSink *element, |
526 | RefMode mode) |
527 | : QGstBaseSink{ |
528 | qGstCheckedCast<GstBaseSink>(arg: element), |
529 | mode, |
530 | } |
531 | { |
532 | } |
533 | |
534 | void QGstVideoRendererSinkElement::setActive(bool isActive) |
535 | { |
536 | qGstVideoRendererSink()->renderer->setActive(isActive); |
537 | } |
538 | |
539 | QGstVideoRendererSink *QGstVideoRendererSinkElement::qGstVideoRendererSink() const |
540 | { |
541 | return reinterpret_cast<QGstVideoRendererSink *>(element()); |
542 | } |
543 | |
544 | QT_END_NAMESPACE |
545 |
Definitions
- qLcGstVideoRenderer
- QGstVideoRenderer
- ~QGstVideoRenderer
- createSurfaceCaps
- customEvent
- handleNewBuffer
- caps
- start
- stop
- unlock
- proposeAllocation
- render
- query
- gstEvent
- setActive
- updateCurrentVideoFrame
- gstEventHandleTag
- gstEventHandleEOS
- gstEventHandleFlushStart
- gstEventHandleFlushStop
- gvrs_sink_parent_class
- gvrs_current_sink
- createSink
- setSink
- get_type
- class_init
- base_init
- instance_init
- finalize
- change_state
- get_caps
- set_caps
- propose_allocation
- stop
- unlock
- show_frame
- query
- event
- QGstVideoRendererSinkElement
- setActive
Learn Advanced QML with KDAB
Find out more