1 | // Copyright (C) 2021 The Qt Company Ltd. |
2 | // Copyright (C) 2016 Research In Motion |
3 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
4 | #include "qquickvideooutput_p.h" |
5 | |
6 | #include <private/qvideooutputorientationhandler_p.h> |
7 | #include <private/qvideoframetexturepool_p.h> |
8 | #include <QtMultimedia/qmediaplayer.h> |
9 | #include <QtMultimedia/qmediacapturesession.h> |
10 | #include <private/qfactoryloader_p.h> |
11 | #include <QtCore/qloggingcategory.h> |
12 | #include <QtQuick/QQuickWindow> |
13 | #include <private/qquickwindow_p.h> |
14 | #include <private/qmultimediautils_p.h> |
15 | #include <qsgvideonode_p.h> |
16 | #include <QtCore/qrunnable.h> |
17 | |
18 | QT_BEGIN_NAMESPACE |
19 | |
20 | static Q_LOGGING_CATEGORY(qLcVideo, "qt.multimedia.video" ) |
21 | |
22 | namespace { |
23 | |
24 | inline bool qIsDefaultAspect(int o) |
25 | { |
26 | return (o % 180) == 0; |
27 | } |
28 | |
29 | inline bool qIsDefaultAspect(QtVideo::Rotation rotation) |
30 | { |
31 | return qIsDefaultAspect(o: qToUnderlying(e: rotation)); |
32 | } |
33 | } |
34 | |
35 | /*! |
36 | \qmltype VideoOutput |
37 | //! \nativetype QQuickVideoOutput |
38 | \inherits Item |
39 | \brief Render video or camera viewfinder. |
40 | |
41 | \ingroup multimedia_qml |
42 | \ingroup multimedia_video_qml |
43 | \inqmlmodule QtMultimedia |
44 | |
45 | \qml |
46 | |
47 | Rectangle { |
48 | width: 800 |
49 | height: 600 |
50 | color: "black" |
51 | |
52 | MediaPlayer { |
53 | id: player |
54 | source: "file://video.webm" |
55 | videoOutput: videoOutput |
56 | } |
57 | |
58 | VideoOutput { |
59 | id: videoOutput |
60 | anchors.fill: parent |
61 | } |
62 | } |
63 | |
64 | \endqml |
65 | |
66 | The VideoOutput item supports untransformed, stretched, and uniformly scaled video presentation. |
67 | For a description of stretched uniformly scaled presentation, see the \l fillMode property |
68 | description. |
69 | |
70 | \sa MediaPlayer, Camera |
71 | |
72 | \omit |
73 | \section1 Screen Saver |
74 | |
75 | If it is likely that an application will be playing video for an extended |
76 | period of time without user interaction it may be necessary to disable |
77 | the platform's screen saver. The \l ScreenSaver (from \l QtSystemInfo) |
78 | may be used to disable the screensaver in this fashion: |
79 | |
80 | \qml |
81 | import QtSystemInfo |
82 | |
83 | ScreenSaver { screenSaverEnabled: false } |
84 | \endqml |
85 | \endomit |
86 | */ |
87 | |
88 | // TODO: Restore Qt System Info docs when the module is released |
89 | |
90 | /*! |
91 | \internal |
92 | \class QQuickVideoOutput |
93 | \brief The QQuickVideoOutput class provides a video output item. |
94 | */ |
95 | |
96 | QQuickVideoOutput::QQuickVideoOutput(QQuickItem *parent) : |
97 | QQuickItem(parent) |
98 | { |
99 | setFlag(flag: ItemHasContents, enabled: true); |
100 | |
101 | m_sink = new QQuickVideoSink(this); |
102 | qRegisterMetaType<QVideoFrameFormat>(); |
103 | |
104 | // TODO: investigate if we have any benefit of setting frame in the source thread |
105 | connect(sender: m_sink, signal: &QVideoSink::videoFrameChanged, context: this, |
106 | slot: [this](const QVideoFrame &frame) { |
107 | setFrame(frame); |
108 | }, |
109 | type: Qt::DirectConnection); |
110 | |
111 | initRhiForSink(); |
112 | } |
113 | |
114 | QQuickVideoOutput::~QQuickVideoOutput() |
115 | { |
116 | } |
117 | |
118 | /*! |
119 | \qmlproperty object QtMultimedia::VideoOutput::videoSink |
120 | |
121 | This property holds the underlaying C++ QVideoSink object that is used |
122 | to render the video frames to this VideoOutput element. |
123 | |
124 | Normal usage of VideoOutput from QML should not require using this property. |
125 | */ |
126 | |
127 | QVideoSink *QQuickVideoOutput::videoSink() const |
128 | { |
129 | return m_sink; |
130 | } |
131 | |
132 | /*! |
133 | \qmlproperty enumeration QtMultimedia::VideoOutput::fillMode |
134 | |
135 | Set this property to define how the video is scaled to fit the target area. |
136 | |
137 | \list |
138 | \li Stretch - the video is scaled to fit. |
139 | \li PreserveAspectFit - the video is scaled uniformly to fit without cropping |
140 | \li PreserveAspectCrop - the video is scaled uniformly to fill, cropping if necessary |
141 | \endlist |
142 | |
143 | The default fill mode is PreserveAspectFit. |
144 | */ |
145 | |
146 | QQuickVideoOutput::FillMode QQuickVideoOutput::fillMode() const |
147 | { |
148 | return FillMode(m_aspectRatioMode); |
149 | } |
150 | |
151 | void QQuickVideoOutput::setFillMode(FillMode mode) |
152 | { |
153 | if (mode == fillMode()) |
154 | return; |
155 | |
156 | m_aspectRatioMode = Qt::AspectRatioMode(mode); |
157 | |
158 | m_geometryDirty = true; |
159 | update(); |
160 | |
161 | emit fillModeChanged(mode); |
162 | } |
163 | |
164 | void QQuickVideoOutput::_q_newFrame(QSize size) |
165 | { |
166 | update(); |
167 | |
168 | size = qRotatedFrameSize(size, rotation: m_frameDisplayingRotation); |
169 | |
170 | if (m_nativeSize != size) { |
171 | m_nativeSize = size; |
172 | |
173 | m_geometryDirty = true; |
174 | |
175 | setImplicitWidth(size.width()); |
176 | setImplicitHeight(size.height()); |
177 | |
178 | emit sourceRectChanged(); |
179 | } |
180 | } |
181 | |
182 | /* Based on fill mode and our size, figure out the source/dest rects */ |
183 | void QQuickVideoOutput::_q_updateGeometry() |
184 | { |
185 | const QRectF rect(0, 0, width(), height()); |
186 | const QRectF absoluteRect(x(), y(), width(), height()); |
187 | |
188 | if (!m_geometryDirty && m_lastRect == absoluteRect) |
189 | return; |
190 | |
191 | QRectF oldContentRect(m_contentRect); |
192 | |
193 | m_geometryDirty = false; |
194 | m_lastRect = absoluteRect; |
195 | |
196 | const auto fill = m_aspectRatioMode; |
197 | if (m_nativeSize.isEmpty()) { |
198 | //this is necessary for item to receive the |
199 | //first paint event and configure video surface. |
200 | m_contentRect = rect; |
201 | } else if (fill == Qt::IgnoreAspectRatio) { |
202 | m_contentRect = rect; |
203 | } else { |
204 | QSizeF scaled = m_nativeSize; |
205 | scaled.scale(s: rect.size(), mode: fill); |
206 | |
207 | m_contentRect = QRectF(QPointF(), scaled); |
208 | m_contentRect.moveCenter(p: rect.center()); |
209 | } |
210 | |
211 | updateGeometry(); |
212 | |
213 | if (m_contentRect != oldContentRect) |
214 | emit contentRectChanged(); |
215 | } |
216 | |
217 | /*! |
218 | \qmlproperty int QtMultimedia::VideoOutput::orientation |
219 | |
220 | This property determines the angle in, degrees, at which the displayed video |
221 | is rotated clockwise in video coordinates, where the Y-axis points |
222 | downwards on the display. |
223 | |
224 | The orientation change affects the mapping of coordinates from the source to the viewport. |
225 | |
226 | Only multiples of \c 90 degrees are supported, that is 0, 90, -90, 180, 270, etc., |
227 | otherwise, the specified value is ignored. |
228 | |
229 | In some cases, the source video stream requires a certain |
230 | orientation to be corrected. This includes |
231 | sources like a camera viewfinder, where the displayed |
232 | viewfinder should match the reality, no matter what rotation |
233 | the rest of the user interface has. |
234 | |
235 | We recommend using this property to compensate a user interface |
236 | rotation, or align the output view with other application business |
237 | requirements. |
238 | |
239 | The default value is \c 0. |
240 | */ |
241 | int QQuickVideoOutput::orientation() const |
242 | { |
243 | return m_orientation; |
244 | } |
245 | |
246 | void QQuickVideoOutput::setOrientation(int orientation) |
247 | { |
248 | // Make sure it's a multiple of 90. |
249 | if (orientation % 90) |
250 | return; |
251 | |
252 | // If there's no actual change, return |
253 | if (m_orientation == orientation) |
254 | return; |
255 | |
256 | // If the new orientation is the same effect |
257 | // as the old one, don't update the video node stuff |
258 | if (qVideoRotationFromDegrees(clockwiseDegrees: orientation - m_orientation) == QtVideo::Rotation::None) { |
259 | m_orientation = orientation; |
260 | emit orientationChanged(); |
261 | return; |
262 | } |
263 | |
264 | m_geometryDirty = true; |
265 | |
266 | // Otherwise, a new orientation |
267 | // See if we need to change aspect ratio orientation too |
268 | bool oldAspect = qIsDefaultAspect(o: m_orientation); |
269 | bool newAspect = qIsDefaultAspect(o: orientation); |
270 | |
271 | m_orientation = orientation; |
272 | |
273 | { |
274 | QMutexLocker lock(&m_frameMutex); |
275 | m_frameDisplayingRotation = qNormalizedFrameTransformation(frame: m_frame, videoOutputRotation: m_orientation).rotation; |
276 | } |
277 | |
278 | if (oldAspect != newAspect) { |
279 | m_nativeSize.transpose(); |
280 | |
281 | setImplicitWidth(m_nativeSize.width()); |
282 | setImplicitHeight(m_nativeSize.height()); |
283 | |
284 | // Source rectangle does not change for orientation |
285 | } |
286 | |
287 | update(); |
288 | emit orientationChanged(); |
289 | } |
290 | |
291 | /*! |
292 | \qmlproperty rectangle QtMultimedia::VideoOutput::contentRect |
293 | |
294 | This property holds the item coordinates of the area that |
295 | would contain video to render. With certain fill modes, |
296 | this rectangle will be larger than the visible area of the |
297 | \c VideoOutput. |
298 | |
299 | This property is useful when other coordinates are specified |
300 | in terms of the source dimensions - this applied for relative |
301 | (normalized) frame coordinates in the range of 0 to 1.0. |
302 | |
303 | Areas outside this will be transparent. |
304 | */ |
305 | QRectF QQuickVideoOutput::contentRect() const |
306 | { |
307 | return m_contentRect; |
308 | } |
309 | |
310 | /*! |
311 | \qmlproperty rectangle QtMultimedia::VideoOutput::sourceRect |
312 | |
313 | This property holds the area of the source video |
314 | content that is considered for rendering. The |
315 | values are in source pixel coordinates, adjusted for |
316 | the source's pixel aspect ratio. |
317 | |
318 | Note that typically the top left corner of this rectangle |
319 | will be \c {0,0} while the width and height will be the |
320 | width and height of the input content. Only when the video |
321 | source has a viewport set, these values will differ. |
322 | |
323 | The orientation setting does not affect this rectangle. |
324 | |
325 | \sa QVideoFrameFormat::viewport() |
326 | */ |
327 | QRectF QQuickVideoOutput::sourceRect() const |
328 | { |
329 | // We might have to transpose back |
330 | QSizeF size = m_nativeSize; |
331 | if (!size.isValid()) |
332 | return {}; |
333 | |
334 | if (!qIsDefaultAspect(rotation: m_frameDisplayingRotation)) |
335 | size.transpose(); |
336 | |
337 | |
338 | // Take the viewport into account for the top left position. |
339 | // m_nativeSize is already adjusted to the viewport, as it originates |
340 | // from QVideoFrameFormat::viewport(), which includes pixel aspect ratio |
341 | const QRectF viewport = adjustedViewport(); |
342 | Q_ASSERT(viewport.size() == size); |
343 | return QRectF(viewport.topLeft(), size); |
344 | } |
345 | |
346 | void QQuickVideoOutput::geometryChange(const QRectF &newGeometry, const QRectF &oldGeometry) |
347 | { |
348 | Q_UNUSED(newGeometry); |
349 | Q_UNUSED(oldGeometry); |
350 | |
351 | QQuickItem::geometryChange(newGeometry, oldGeometry); |
352 | |
353 | // Explicitly listen to geometry changes here. This is needed since changing the position does |
354 | // not trigger a call to updatePaintNode(). |
355 | // We need to react to position changes though, as the window backened's display rect gets |
356 | // changed in that situation. |
357 | _q_updateGeometry(); |
358 | } |
359 | |
360 | void QQuickVideoOutput::_q_invalidateSceneGraph() |
361 | { |
362 | // Invoked in the renderer thread |
363 | |
364 | if (auto texturePool = m_texturePool.lock()) |
365 | texturePool->clearTextures(); |
366 | m_sink->setRhi(nullptr); |
367 | } |
368 | |
369 | void QQuickVideoOutput::_q_sceneGraphInitialized() |
370 | { |
371 | initRhiForSink(); |
372 | } |
373 | |
374 | void QQuickVideoOutput::_q_afterFrameEnd() |
375 | { |
376 | if (auto texturePool = m_texturePool.lock()) |
377 | texturePool->onFrameEndInvoked(); |
378 | } |
379 | |
380 | void QQuickVideoOutput::releaseResources() |
381 | { |
382 | // Called on the gui thread when the window is closed or changed. |
383 | initRhiForSink(); |
384 | QQuickItem::releaseResources(); |
385 | } |
386 | |
387 | void QQuickVideoOutput::initRhiForSink() |
388 | { |
389 | QRhi *rhi = m_window ? QQuickWindowPrivate::get(c: m_window)->rhi : nullptr; |
390 | m_sink->setRhi(rhi); |
391 | } |
392 | |
393 | void QQuickVideoOutput::itemChange(QQuickItem::ItemChange change, |
394 | const QQuickItem::ItemChangeData &changeData) |
395 | { |
396 | if (change != QQuickItem::ItemSceneChange) |
397 | return; |
398 | |
399 | if (changeData.window == m_window) |
400 | return; |
401 | if (m_window) |
402 | disconnect(receiver: m_window); |
403 | m_window = changeData.window; |
404 | |
405 | if (m_window) { |
406 | // We want to receive the signals in the render thread |
407 | connect(sender: m_window, signal: &QQuickWindow::sceneGraphInitialized, context: this, |
408 | slot: &QQuickVideoOutput::_q_sceneGraphInitialized, type: Qt::DirectConnection); |
409 | connect(sender: m_window, signal: &QQuickWindow::sceneGraphInvalidated, context: this, |
410 | slot: &QQuickVideoOutput::_q_invalidateSceneGraph, type: Qt::DirectConnection); |
411 | connect(sender: m_window, signal: &QQuickWindow::afterFrameEnd, context: this, slot: &QQuickVideoOutput::_q_afterFrameEnd, |
412 | type: Qt::DirectConnection); |
413 | } |
414 | initRhiForSink(); |
415 | } |
416 | |
417 | QSize QQuickVideoOutput::nativeSize() const |
418 | { |
419 | return m_videoFormat.viewport().size(); |
420 | } |
421 | |
422 | void QQuickVideoOutput::updateGeometry() |
423 | { |
424 | const QRectF viewport = m_videoFormat.viewport(); |
425 | const QSizeF frameSize = m_videoFormat.frameSize(); |
426 | const QRectF normalizedViewport(viewport.x() / frameSize.width(), |
427 | viewport.y() / frameSize.height(), |
428 | viewport.width() / frameSize.width(), |
429 | viewport.height() / frameSize.height()); |
430 | const QRectF rect(0, 0, width(), height()); |
431 | if (nativeSize().isEmpty()) { |
432 | m_renderedRect = rect; |
433 | m_sourceTextureRect = normalizedViewport; |
434 | } else if (m_aspectRatioMode == Qt::IgnoreAspectRatio) { |
435 | m_renderedRect = rect; |
436 | m_sourceTextureRect = normalizedViewport; |
437 | } else if (m_aspectRatioMode == Qt::KeepAspectRatio) { |
438 | m_sourceTextureRect = normalizedViewport; |
439 | m_renderedRect = contentRect(); |
440 | } else if (m_aspectRatioMode == Qt::KeepAspectRatioByExpanding) { |
441 | m_renderedRect = rect; |
442 | const qreal contentHeight = contentRect().height(); |
443 | const qreal contentWidth = contentRect().width(); |
444 | |
445 | // Calculate the size of the source rectangle without taking the viewport into account |
446 | const qreal relativeOffsetLeft = -contentRect().left() / contentWidth; |
447 | const qreal relativeOffsetTop = -contentRect().top() / contentHeight; |
448 | const qreal relativeWidth = rect.width() / contentWidth; |
449 | const qreal relativeHeight = rect.height() / contentHeight; |
450 | |
451 | // Now take the viewport size into account |
452 | const qreal totalOffsetLeft = normalizedViewport.x() + relativeOffsetLeft * normalizedViewport.width(); |
453 | const qreal totalOffsetTop = normalizedViewport.y() + relativeOffsetTop * normalizedViewport.height(); |
454 | const qreal totalWidth = normalizedViewport.width() * relativeWidth; |
455 | const qreal totalHeight = normalizedViewport.height() * relativeHeight; |
456 | |
457 | if (qIsDefaultAspect(rotation: m_frameDisplayingRotation)) { |
458 | m_sourceTextureRect = QRectF(totalOffsetLeft, totalOffsetTop, |
459 | totalWidth, totalHeight); |
460 | } else { |
461 | m_sourceTextureRect = QRectF(totalOffsetTop, totalOffsetLeft, |
462 | totalHeight, totalWidth); |
463 | } |
464 | } |
465 | } |
466 | |
467 | QSGNode *QQuickVideoOutput::updatePaintNode(QSGNode *oldNode, |
468 | QQuickItem::UpdatePaintNodeData *data) |
469 | { |
470 | Q_UNUSED(data); |
471 | _q_updateGeometry(); |
472 | |
473 | QSGVideoNode *videoNode = static_cast<QSGVideoNode *>(oldNode); |
474 | |
475 | QMutexLocker lock(&m_frameMutex); |
476 | |
477 | if (m_frameChanged) { |
478 | if (videoNode && videoNode->pixelFormat() != m_frame.pixelFormat()) { |
479 | qCDebug(qLcVideo) << "updatePaintNode: deleting old video node because frame format changed" ; |
480 | delete videoNode; |
481 | videoNode = nullptr; |
482 | } |
483 | |
484 | if (!m_frame.isValid()) { |
485 | qCDebug(qLcVideo) << "updatePaintNode: no frames yet" ; |
486 | m_frameChanged = false; |
487 | return nullptr; |
488 | } |
489 | |
490 | if (!videoNode) { |
491 | // Get a node that supports our frame. The surface is irrelevant, our |
492 | // QSGVideoItemSurface supports (logically) anything. |
493 | updateGeometry(); |
494 | QRhi *rhi = m_window ? QQuickWindowPrivate::get(c: m_window)->rhi : nullptr; |
495 | videoNode = new QSGVideoNode(this, m_videoFormat, rhi); |
496 | m_texturePool = videoNode->texturePool(); |
497 | qCDebug(qLcVideo) << "updatePaintNode: Video node created. Handle type:" << m_frame.handleType(); |
498 | } |
499 | } |
500 | |
501 | if (!videoNode) { |
502 | m_frameChanged = false; |
503 | m_frame = QVideoFrame(); |
504 | return nullptr; |
505 | } |
506 | |
507 | if (m_frameChanged) { |
508 | videoNode->setCurrentFrame(m_frame); |
509 | |
510 | updateHdr(videoNode); |
511 | |
512 | //don't keep the frame for more than really necessary |
513 | m_frameChanged = false; |
514 | m_frame = QVideoFrame(); |
515 | } |
516 | |
517 | videoNode->setTexturedRectGeometry( |
518 | boundingRect: m_renderedRect, textureRect: m_sourceTextureRect, |
519 | videoOutputTransformation: VideoTransformation{ .rotation: qVideoRotationFromDegrees(clockwiseDegrees: orientation()), .mirrorredHorizontallyAfterRotation: m_mirrored }); |
520 | |
521 | return videoNode; |
522 | } |
523 | |
524 | void QQuickVideoOutput::updateHdr(QSGVideoNode *videoNode) |
525 | { |
526 | auto *videoOutputWindow = window(); |
527 | if (!videoOutputWindow) |
528 | return; |
529 | |
530 | auto *swapChain = videoOutputWindow->swapChain(); |
531 | if (!swapChain) |
532 | return; |
533 | |
534 | const auto requiredSwapChainFormat = qGetRequiredSwapChainFormat(format: m_frame.surfaceFormat()); |
535 | if (qShouldUpdateSwapChainFormat(swapChain, requiredSwapChainFormat)) { |
536 | auto *recreateSwapChainJob = QRunnable::create(functionToRun: [swapChain, requiredSwapChainFormat]() { |
537 | swapChain->destroy(); |
538 | swapChain->setFormat(requiredSwapChainFormat); |
539 | swapChain->createOrResize(); |
540 | }); |
541 | |
542 | // Even though the 'recreate swap chain' job is scheduled for the current frame the |
543 | // effect will be visible only starting from the next frame since the recreation would |
544 | // happen after the actual swap. |
545 | videoOutputWindow->scheduleRenderJob(job: recreateSwapChainJob, schedule: QQuickWindow::AfterSwapStage); |
546 | } |
547 | |
548 | videoNode->setSurfaceFormat(swapChain->format()); |
549 | videoNode->setHdrInfo(swapChain->hdrInfo()); |
550 | } |
551 | |
552 | QRectF QQuickVideoOutput::adjustedViewport() const |
553 | { |
554 | return m_videoFormat.viewport(); |
555 | } |
556 | |
557 | void QQuickVideoOutput::setFrame(const QVideoFrame &frame) |
558 | { |
559 | { |
560 | QMutexLocker lock(&m_frameMutex); |
561 | |
562 | m_videoFormat = frame.surfaceFormat(); |
563 | m_frame = frame; |
564 | m_frameDisplayingRotation = qNormalizedFrameTransformation(frame, videoOutputRotation: m_orientation).rotation; |
565 | m_frameChanged = true; |
566 | } |
567 | |
568 | QMetaObject::invokeMethod(object: this, function: &QQuickVideoOutput::_q_newFrame, args: frame.size()); |
569 | } |
570 | |
571 | QT_END_NAMESPACE |
572 | |
573 | #include "moc_qquickvideooutput_p.cpp" |
574 | |