1 | // Copyright (C) 2016 The Qt Company Ltd. |
---|---|
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "qvideoframeformat.h" |
5 | #include "qvideotexturehelper_p.h" |
6 | #include "qvideotransformation_p.h" |
7 | |
8 | #include <qdebug.h> |
9 | #include <qlist.h> |
10 | #include <qmetatype.h> |
11 | #include <qpair.h> |
12 | #include <qvariant.h> |
13 | #include <qmatrix4x4.h> |
14 | |
15 | static void initResource() { |
16 | Q_INIT_RESOURCE(qtmultimedia_shaders); |
17 | } |
18 | |
19 | QT_BEGIN_NAMESPACE |
20 | |
21 | class QVideoFrameFormatPrivate : public QSharedData |
22 | { |
23 | public: |
24 | QVideoFrameFormatPrivate() = default; |
25 | |
26 | QVideoFrameFormatPrivate( |
27 | const QSize &size, |
28 | QVideoFrameFormat::PixelFormat format) |
29 | : pixelFormat(format) |
30 | , frameSize(size) |
31 | , viewport(QPoint(0, 0), size) |
32 | { |
33 | } |
34 | |
35 | bool operator ==(const QVideoFrameFormatPrivate &other) const |
36 | { |
37 | if (pixelFormat == other.pixelFormat && scanLineDirection == other.scanLineDirection |
38 | && frameSize == other.frameSize && viewport == other.viewport |
39 | && frameRatesEqual(r1: frameRate, r2: other.frameRate) && colorSpace == other.colorSpace |
40 | && transformation == other.transformation) |
41 | return true; |
42 | |
43 | return false; |
44 | } |
45 | |
46 | inline static bool frameRatesEqual(qreal r1, qreal r2) |
47 | { |
48 | return qAbs(t: r1 - r2) <= 0.00001 * qMin(a: qAbs(t: r1), b: qAbs(t: r2)); |
49 | } |
50 | |
51 | QVideoFrameFormat::PixelFormat pixelFormat = QVideoFrameFormat::Format_Invalid; |
52 | QVideoFrameFormat::Direction scanLineDirection = QVideoFrameFormat::TopToBottom; |
53 | QSize frameSize; |
54 | QVideoFrameFormat::ColorSpace colorSpace = QVideoFrameFormat::ColorSpace_Undefined; |
55 | QVideoFrameFormat::ColorTransfer colorTransfer = QVideoFrameFormat::ColorTransfer_Unknown; |
56 | QVideoFrameFormat::ColorRange colorRange = QVideoFrameFormat::ColorRange_Unknown; |
57 | QRect viewport; |
58 | float frameRate = 0.0; |
59 | float maxLuminance = -1.; |
60 | VideoTransformation transformation; |
61 | }; |
62 | |
63 | QT_DEFINE_QESDP_SPECIALIZATION_DTOR(QVideoFrameFormatPrivate); |
64 | |
65 | /*! |
66 | \class QVideoFrameFormat |
67 | \brief The QVideoFrameFormat class specifies the stream format of a video presentation |
68 | surface. |
69 | \inmodule QtMultimedia |
70 | |
71 | \ingroup multimedia |
72 | \ingroup multimedia_video |
73 | |
74 | A video sink presents a stream of video frames. QVideoFrameFormat describes the type of |
75 | the frames and determines how they should be presented. |
76 | |
77 | The core properties of a video stream required to set up a video sink are the pixel format |
78 | given by pixelFormat(), and the frame dimensions given by frameSize(). |
79 | |
80 | The region of a frame that is actually displayed on a video surface is given by the viewport(). |
81 | A stream may have a viewport less than the entire region of a frame to allow for videos smaller |
82 | than the nearest optimal size of a video frame. For example the width of a frame may be |
83 | extended so that the start of each scan line is eight byte aligned. |
84 | |
85 | Other common properties are the scanLineDirection(), frameRate() and the yCrCbColorSpace(). |
86 | */ |
87 | |
88 | /*! |
89 | \enum QVideoFrameFormat::PixelFormat |
90 | |
91 | Enumerates video data types. |
92 | |
93 | \value Format_Invalid |
94 | The frame is invalid. |
95 | |
96 | \value Format_ARGB8888 |
97 | The frame is stored using a ARGB format with 8 bits per component. |
98 | |
99 | \value Format_ARGB8888_Premultiplied |
100 | The frame stored using a premultiplied ARGB format with 8 bits per component. |
101 | |
102 | \value Format_XRGB8888 |
103 | The frame stored using a 32 bits per pixel RGB format (0xff, R, G, B). |
104 | |
105 | \value Format_BGRA8888 |
106 | The frame is stored using a 32-bit BGRA format (0xBBGGRRAA). |
107 | |
108 | \value Format_BGRA8888_Premultiplied |
109 | The frame is stored using a premultiplied 32bit BGRA format. |
110 | |
111 | \value Format_ABGR8888 |
112 | The frame is stored using a 32-bit ABGR format (0xAABBGGRR). |
113 | |
114 | \value Format_XBGR8888 |
115 | The frame is stored using a 32-bit BGR format (0xffBBGGRR). |
116 | |
117 | \value Format_RGBA8888 |
118 | The frame is stored in memory as the bytes R, G, B, A/X, with R at the lowest address and A/X at the highest address. |
119 | |
120 | \value Format_BGRX8888 |
121 | The frame is stored in format 32-bit BGRx format, [31:0] B:G:R:x 8:8:8:8 little endian |
122 | |
123 | \value Format_RGBX8888 |
124 | The frame is stored in memory as the bytes R, G, B, A/X, with R at the lowest address and A/X at the highest address. |
125 | |
126 | \value Format_AYUV |
127 | The frame is stored using a packed 32-bit AYUV format (0xAAYYUUVV). |
128 | |
129 | \value Format_AYUV_Premultiplied |
130 | The frame is stored using a packed premultiplied 32-bit AYUV format (0xAAYYUUVV). |
131 | |
132 | \value Format_YUV420P |
133 | The frame is stored using an 8-bit per component planar YUV format with the U and V planes |
134 | horizontally and vertically sub-sampled, i.e. the height and width of the U and V planes are |
135 | half that of the Y plane. |
136 | |
137 | \value Format_YUV422P |
138 | The frame is stored using an 8-bit per component planar YUV format with the U and V planes |
139 | horizontally sub-sampled, i.e. the width of the U and V planes are |
140 | half that of the Y plane, and height of U and V planes is the same as Y. |
141 | |
142 | \value Format_YV12 |
143 | The frame is stored using an 8-bit per component planar YVU format with the V and U planes |
144 | horizontally and vertically sub-sampled, i.e. the height and width of the V and U planes are |
145 | half that of the Y plane. |
146 | |
147 | \value Format_UYVY |
148 | The frame is stored using an 8-bit per component packed YUV format with the U and V planes |
149 | horizontally sub-sampled (U-Y-V-Y), i.e. two horizontally adjacent pixels are stored as a 32-bit |
150 | macropixel which has a Y value for each pixel and common U and V values. |
151 | |
152 | \value Format_YUYV |
153 | The frame is stored using an 8-bit per component packed YUV format with the U and V planes |
154 | horizontally sub-sampled (Y-U-Y-V), i.e. two horizontally adjacent pixels are stored as a 32-bit |
155 | macropixel which has a Y value for each pixel and common U and V values. |
156 | |
157 | \value Format_NV12 |
158 | The frame is stored using an 8-bit per component semi-planar YUV format with a Y plane (Y) |
159 | followed by a horizontally and vertically sub-sampled, packed UV plane (U-V). |
160 | |
161 | \value Format_NV21 |
162 | The frame is stored using an 8-bit per component semi-planar YUV format with a Y plane (Y) |
163 | followed by a horizontally and vertically sub-sampled, packed VU plane (V-U). |
164 | |
165 | \value Format_IMC1 |
166 | The frame is stored using an 8-bit per component planar YUV format with the U and V planes |
167 | horizontally and vertically sub-sampled. This is similar to the Format_YUV420P type, except |
168 | that the bytes per line of the U and V planes are padded out to the same stride as the Y plane. |
169 | |
170 | \value Format_IMC2 |
171 | The frame is stored using an 8-bit per component planar YUV format with the U and V planes |
172 | horizontally and vertically sub-sampled. This is similar to the Format_YUV420P type, except |
173 | that the lines of the U and V planes are interleaved, i.e. each line of U data is followed by a |
174 | line of V data creating a single line of the same stride as the Y data. |
175 | |
176 | \value Format_IMC3 |
177 | The frame is stored using an 8-bit per component planar YVU format with the V and U planes |
178 | horizontally and vertically sub-sampled. This is similar to the Format_YV12 type, except that |
179 | the bytes per line of the V and U planes are padded out to the same stride as the Y plane. |
180 | |
181 | \value Format_IMC4 |
182 | The frame is stored using an 8-bit per component planar YVU format with the V and U planes |
183 | horizontally and vertically sub-sampled. This is similar to the Format_YV12 type, except that |
184 | the lines of the V and U planes are interleaved, i.e. each line of V data is followed by a line |
185 | of U data creating a single line of the same stride as the Y data. |
186 | |
187 | \value Format_P010 |
188 | The frame is stored using a 16bit per component semi-planar YUV format with a Y plane (Y) |
189 | followed by a horizontally and vertically sub-sampled, packed UV plane (U-V). Only the 10 most |
190 | significant bits of each component are being used. |
191 | |
192 | \value Format_P016 |
193 | The frame is stored using a 16bit per component semi-planar YUV format with a Y plane (Y) |
194 | followed by a horizontally and vertically sub-sampled, packed UV plane (U-V). |
195 | |
196 | \value Format_Y8 |
197 | The frame is stored using an 8-bit greyscale format. |
198 | |
199 | \value Format_Y16 |
200 | The frame is stored using a 16-bit linear greyscale format. Little endian. |
201 | |
202 | \value Format_Jpeg |
203 | The frame is stored in compressed Jpeg format. |
204 | |
205 | \value Format_SamplerExternalOES |
206 | The frame is stored in external OES texture format. This is currently only being used on Android. |
207 | |
208 | \value Format_SamplerRect |
209 | The frame is stored in rectangle texture format (GL_TEXTURE_RECTANGLE). This is only being used on |
210 | macOS with an OpenGL based Rendering Hardware interface. The underlying pixel format stored in the |
211 | texture is Format_BRGA8888. |
212 | |
213 | \value Format_YUV420P10 |
214 | Similar to YUV420, but uses 16bits per component, 10 of those significant. |
215 | */ |
216 | |
217 | /*! |
218 | \enum QVideoFrameFormat::Direction |
219 | |
220 | Enumerates the layout direction of video scan lines. |
221 | |
222 | \value TopToBottom Scan lines are arranged from the top of the frame to the bottom. |
223 | \value BottomToTop Scan lines are arranged from the bottom of the frame to the top. |
224 | */ |
225 | |
226 | /*! |
227 | \enum QVideoFrameFormat::YCbCrColorSpace |
228 | |
229 | \deprecated Use QVideoFrameFormat::ColorSpace instead. |
230 | |
231 | Enumerates the Y'CbCr color space of video frames. |
232 | |
233 | \value YCbCr_Undefined |
234 | No color space is specified. |
235 | |
236 | \value YCbCr_BT601 |
237 | A Y'CbCr color space defined by ITU-R recommendation BT.601 |
238 | with Y value range from 16 to 235, and Cb/Cr range from 16 to 240. |
239 | Used mostly by older videos that were targeting CRT displays. |
240 | |
241 | \value YCbCr_BT709 |
242 | A Y'CbCr color space defined by ITU-R BT.709 with the same values range as YCbCr_BT601. |
243 | The most commonly used color space today. |
244 | |
245 | \value YCbCr_xvYCC601 |
246 | This value is deprecated. Please check the \l ColorRange instead. |
247 | The BT.601 color space with the value range extended to 0 to 255. |
248 | It is backward compatible with BT.601 and uses values outside BT.601 range to represent a |
249 | wider range of colors. |
250 | |
251 | \value YCbCr_xvYCC709 |
252 | This value is deprecated. Please check the \l ColorRange instead. |
253 | The BT.709 color space with the value range extended to 0 to 255. |
254 | |
255 | \value YCbCr_JPEG |
256 | The full range Y'CbCr color space used in most JPEG files. |
257 | |
258 | \value YCbCr_BT2020 |
259 | The color space defined by ITU-R BT.2020. Used mainly for HDR videos. |
260 | */ |
261 | |
262 | |
263 | /*! |
264 | \enum QVideoFrameFormat::ColorSpace |
265 | |
266 | Enumerates the color space of video frames. |
267 | |
268 | \value ColorSpace_Undefined |
269 | No color space is specified. |
270 | |
271 | \value ColorSpace_BT601 |
272 | A color space defined by ITU-R recommendation BT.601 |
273 | with Y value range from 16 to 235, and Cb/Cr range from 16 to 240. |
274 | Used mostly by older videos that were targeting CRT displays. |
275 | |
276 | \value ColorSpace_BT709 |
277 | A color space defined by ITU-R BT.709 with the same values range as ColorSpace_BT601. |
278 | The most commonly used color space today. |
279 | |
280 | \value ColorSpace_AdobeRgb |
281 | The full range YUV color space used in most JPEG files. |
282 | |
283 | \value ColorSpace_BT2020 |
284 | The color space defined by ITU-R BT.2020. Used mainly for HDR videos. |
285 | */ |
286 | |
287 | /*! |
288 | \enum QVideoFrameFormat::ColorTransfer |
289 | |
290 | \value ColorTransfer_Unknown |
291 | The color transfer function is unknown. |
292 | |
293 | \value ColorTransfer_BT709 |
294 | Color values are encoded according to BT709. See also https://www.itu.int/rec/R-REC-BT.709/en. |
295 | This is close to, but not identical to a gamma curve of 2.2, and the same transfer curve as is |
296 | used in sRGB. |
297 | |
298 | \value ColorTransfer_BT601 |
299 | Color values are encoded according to BT601. See also https://www.itu.int/rec/R-REC-BT.601/en. |
300 | |
301 | \value ColorTransfer_Linear |
302 | Color values are linear |
303 | |
304 | \value ColorTransfer_Gamma22 |
305 | Color values are encoded with a gamma of 2.2 |
306 | |
307 | \value ColorTransfer_Gamma28 |
308 | Color values are encoded with a gamma of 2.8 |
309 | |
310 | \value ColorTransfer_ST2084 |
311 | Color values are encoded using STME ST 2084. This transfer function is the most common HDR |
312 | transfer function and often called the 'perceptual quantizer'. See also https://www.itu.int/rec/R-REC-BT.2100 |
313 | and https://en.wikipedia.org/wiki/Perceptual_quantizer. |
314 | |
315 | |
316 | \value ColorTransfer_STD_B67 |
317 | Color values are encoded using ARIB STD B67. This transfer function is also often referred to as 'hybrid log gamma'. |
318 | See also https://www.itu.int/rec/R-REC-BT.2100 and https://en.wikipedia.org/wiki/Hybrid_log–gamma. |
319 | */ |
320 | |
321 | /*! |
322 | \enum QVideoFrameFormat::ColorRange |
323 | |
324 | Describes the color range used by the video data. Video data usually comes in either full |
325 | color range, where all values are being used, or a more limited range traditionally used in |
326 | YUV video formats, where a subset of all values is being used. |
327 | |
328 | \value ColorRange_Unknown |
329 | The color range of the video is unknown. |
330 | |
331 | \value ColorRange_Video |
332 | |
333 | The color range traditionally used by most YUV video formats. For 8 bit formats, the Y component is |
334 | limited to values between 16 and 235. The U and V components are limited to values between 16 and 240 |
335 | |
336 | For higher bit depths multiply these values with 2^(depth-8). |
337 | |
338 | \value ColorRange_Full |
339 | |
340 | Full color range. All values from 0 to 2^depth - 1 are valid. |
341 | */ |
342 | |
343 | /*! |
344 | Constructs a null video stream format. |
345 | */ |
346 | QVideoFrameFormat::QVideoFrameFormat() |
347 | : d(new QVideoFrameFormatPrivate) |
348 | { |
349 | initResource(); |
350 | } |
351 | |
352 | /*! |
353 | Constructs a video stream with the given frame \a size and pixel \a format. |
354 | */ |
355 | QVideoFrameFormat::QVideoFrameFormat( |
356 | const QSize& size, QVideoFrameFormat::PixelFormat format) |
357 | : d(new QVideoFrameFormatPrivate(size, format)) |
358 | { |
359 | } |
360 | |
361 | /*! |
362 | Constructs a copy of \a other. |
363 | */ |
364 | QVideoFrameFormat::QVideoFrameFormat(const QVideoFrameFormat &other) = default; |
365 | |
366 | /*! |
367 | \fn QVideoFrameFormat::QVideoFrameFormat(QVideoFrameFormat &&other) |
368 | |
369 | Constructs a QVideoFrameFormat by moving from \a other. |
370 | */ |
371 | |
372 | /*! |
373 | \fn void QVideoFrameFormat::swap(QVideoFrameFormat &other) noexcept |
374 | |
375 | Swaps the current video frame format with the \a other. |
376 | */ |
377 | |
378 | /*! |
379 | Assigns the values of \a other to this object. |
380 | */ |
381 | QVideoFrameFormat &QVideoFrameFormat::operator =(const QVideoFrameFormat &other) = default; |
382 | |
383 | /*! |
384 | \fn QVideoFrameFormat &QVideoFrameFormat::operator =(QVideoFrameFormat &&other) |
385 | |
386 | Moves \a other into this QVideoFrameFormat. |
387 | */ |
388 | |
389 | /*! |
390 | Destroys a video stream description. |
391 | */ |
392 | QVideoFrameFormat::~QVideoFrameFormat() = default; |
393 | |
394 | /*! |
395 | Identifies if a video surface format has a valid pixel format and frame size. |
396 | |
397 | Returns true if the format is valid, and false otherwise. |
398 | */ |
399 | bool QVideoFrameFormat::isValid() const |
400 | { |
401 | return d->pixelFormat != Format_Invalid && d->frameSize.isValid(); |
402 | } |
403 | |
404 | /*! |
405 | Returns true if \a other is the same as this video format, and false if they are different. |
406 | */ |
407 | bool QVideoFrameFormat::operator ==(const QVideoFrameFormat &other) const |
408 | { |
409 | return d == other.d || *d == *other.d; |
410 | } |
411 | |
412 | /*! |
413 | Returns true if \a other is different to this video format, and false if they are the same. |
414 | */ |
415 | bool QVideoFrameFormat::operator !=(const QVideoFrameFormat &other) const |
416 | { |
417 | return d != other.d && !(*d == *other.d); |
418 | } |
419 | |
420 | /*! |
421 | \internal |
422 | */ |
423 | void QVideoFrameFormat::detach() |
424 | { |
425 | d.detach(); |
426 | } |
427 | |
428 | /*! |
429 | Returns the pixel format of frames in a video stream. |
430 | */ |
431 | QVideoFrameFormat::PixelFormat QVideoFrameFormat::pixelFormat() const |
432 | { |
433 | return d->pixelFormat; |
434 | } |
435 | |
436 | /*! |
437 | Returns the dimensions of frames in a video stream. |
438 | |
439 | \sa frameWidth(), frameHeight() |
440 | */ |
441 | QSize QVideoFrameFormat::frameSize() const |
442 | { |
443 | return d->frameSize; |
444 | } |
445 | |
446 | /*! |
447 | Returns the width of frames in a video stream. |
448 | |
449 | \sa frameSize(), frameHeight() |
450 | */ |
451 | int QVideoFrameFormat::frameWidth() const |
452 | { |
453 | return d->frameSize.width(); |
454 | } |
455 | |
456 | /*! |
457 | Returns the height of frame in a video stream. |
458 | */ |
459 | int QVideoFrameFormat::frameHeight() const |
460 | { |
461 | return d->frameSize.height(); |
462 | } |
463 | |
464 | /*! |
465 | Returns the number of planes used. |
466 | This number is depending on the pixel format and is |
467 | 1 for RGB based formats, and a number between 1 and 3 for |
468 | YUV based formats. |
469 | */ |
470 | int QVideoFrameFormat::planeCount() const |
471 | { |
472 | return QVideoTextureHelper::textureDescription(format: d->pixelFormat)->nplanes; |
473 | } |
474 | |
475 | /*! |
476 | Sets the size of frames in a video stream to \a size. |
477 | |
478 | This will reset the viewport() to fill the entire frame. |
479 | */ |
480 | void QVideoFrameFormat::setFrameSize(const QSize &size) |
481 | { |
482 | detach(); |
483 | d->frameSize = size; |
484 | d->viewport = QRect(QPoint(0, 0), size); |
485 | } |
486 | |
487 | /*! |
488 | \overload |
489 | |
490 | Sets the \a width and \a height of frames in a video stream. |
491 | |
492 | This will reset the viewport() to fill the entire frame. |
493 | */ |
494 | void QVideoFrameFormat::setFrameSize(int width, int height) |
495 | { |
496 | detach(); |
497 | d->frameSize = QSize(width, height); |
498 | d->viewport = QRect(0, 0, width, height); |
499 | } |
500 | |
501 | /*! |
502 | Returns the viewport of a video stream. |
503 | |
504 | The viewport is the region of a video frame that is actually displayed. |
505 | |
506 | By default the viewport covers an entire frame. |
507 | */ |
508 | QRect QVideoFrameFormat::viewport() const |
509 | { |
510 | return d->viewport; |
511 | } |
512 | |
513 | /*! |
514 | Sets the viewport of a video stream to \a viewport. |
515 | */ |
516 | void QVideoFrameFormat::setViewport(const QRect &viewport) |
517 | { |
518 | detach(); |
519 | d->viewport = viewport; |
520 | } |
521 | |
522 | /*! |
523 | Returns the direction of scan lines. |
524 | */ |
525 | QVideoFrameFormat::Direction QVideoFrameFormat::scanLineDirection() const |
526 | { |
527 | return d->scanLineDirection; |
528 | } |
529 | |
530 | /*! |
531 | Sets the \a direction of scan lines. |
532 | */ |
533 | void QVideoFrameFormat::setScanLineDirection(Direction direction) |
534 | { |
535 | detach(); |
536 | d->scanLineDirection = direction; |
537 | } |
538 | |
539 | #if QT_DEPRECATED_SINCE(6, 8) |
540 | /*! |
541 | Returns the frame rate of a video stream in frames per second. |
542 | */ |
543 | qreal QVideoFrameFormat::frameRate() const |
544 | { |
545 | return streamFrameRate(); |
546 | } |
547 | |
548 | /*! |
549 | Sets the frame \a rate of a video stream in frames per second. |
550 | */ |
551 | void QVideoFrameFormat::setFrameRate(qreal rate) |
552 | { |
553 | setStreamFrameRate(rate); |
554 | } |
555 | #endif |
556 | |
557 | /*! |
558 | Returns the frame rate of a video stream in frames per second. |
559 | */ |
560 | qreal QVideoFrameFormat::streamFrameRate() const |
561 | { |
562 | return d->frameRate; |
563 | } |
564 | |
565 | /*! |
566 | Sets the frame \a rate of a video stream in frames per second. |
567 | */ |
568 | void QVideoFrameFormat::setStreamFrameRate(qreal rate) |
569 | { |
570 | detach(); |
571 | d->frameRate = rate; |
572 | } |
573 | |
574 | #if QT_DEPRECATED_SINCE(6, 4) |
575 | /*! |
576 | \deprecated Use colorSpace() instead |
577 | |
578 | Returns the Y'CbCr color space of a video stream. |
579 | */ |
580 | QVideoFrameFormat::YCbCrColorSpace QVideoFrameFormat::yCbCrColorSpace() const |
581 | { |
582 | return YCbCrColorSpace(d->colorSpace); |
583 | } |
584 | |
585 | /*! |
586 | \deprecated Use setColorSpace() instead |
587 | |
588 | Sets the Y'CbCr color \a space of a video stream. |
589 | It is only used with raw YUV frame types. |
590 | */ |
591 | void QVideoFrameFormat::setYCbCrColorSpace(QVideoFrameFormat::YCbCrColorSpace space) |
592 | { |
593 | detach(); |
594 | d->colorSpace = ColorSpace(space); |
595 | } |
596 | #endif // QT_DEPRECATED_SINCE(6, 4) |
597 | |
598 | /*! |
599 | Returns the color space of a video stream. |
600 | */ |
601 | QVideoFrameFormat::ColorSpace QVideoFrameFormat::colorSpace() const |
602 | { |
603 | return d->colorSpace; |
604 | } |
605 | |
606 | /*! |
607 | Sets the \a colorSpace of a video stream. |
608 | */ |
609 | void QVideoFrameFormat::setColorSpace(ColorSpace colorSpace) |
610 | { |
611 | detach(); |
612 | d->colorSpace = colorSpace; |
613 | } |
614 | |
615 | /*! |
616 | Returns the color transfer function that should be used to render the |
617 | video stream. |
618 | */ |
619 | QVideoFrameFormat::ColorTransfer QVideoFrameFormat::colorTransfer() const |
620 | { |
621 | return d->colorTransfer; |
622 | } |
623 | |
624 | /*! |
625 | Sets the color transfer function that should be used to render the |
626 | video stream to \a colorTransfer. |
627 | */ |
628 | void QVideoFrameFormat::setColorTransfer(ColorTransfer colorTransfer) |
629 | { |
630 | detach(); |
631 | d->colorTransfer = colorTransfer; |
632 | } |
633 | |
634 | /*! |
635 | Returns the color range that should be used to render the |
636 | video stream. |
637 | */ |
638 | QVideoFrameFormat::ColorRange QVideoFrameFormat::colorRange() const |
639 | { |
640 | return d->colorRange; |
641 | } |
642 | |
643 | /*! |
644 | Sets the color transfer range that should be used to render the |
645 | video stream to \a range. |
646 | */ |
647 | void QVideoFrameFormat::setColorRange(ColorRange range) |
648 | { |
649 | detach(); |
650 | d->colorRange = range; |
651 | } |
652 | |
653 | /*! |
654 | Returns \c true if the surface is mirrored around its vertical axis. |
655 | |
656 | Transformations of \c QVideoFrameFormat, specifically, |
657 | rotation and mirroring, can be determined by the orientation of |
658 | the camera sensor, camera settings, or the orientation of |
659 | the video stream. |
660 | |
661 | Mirroring is applied after rotation. |
662 | |
663 | \note The mirroring here differs from QImage::mirrored, as a vertically mirrored QImage |
664 | will be mirrored around its x-axis. |
665 | |
666 | \since 5.11 |
667 | */ |
668 | bool QVideoFrameFormat::isMirrored() const |
669 | { |
670 | return d->transformation.mirrorredHorizontallyAfterRotation; |
671 | } |
672 | |
673 | /*! |
674 | Sets if the surface is \a mirrored around its vertical axis. |
675 | |
676 | Transformations of \c QVideoFrameFormat, specifically, |
677 | rotation and mirroring, can be determined by the orientation of |
678 | the camera sensor, camera settings, or the orientation of |
679 | the video stream. |
680 | |
681 | Mirroring is applied after rotation. |
682 | |
683 | Default value is \c false. |
684 | |
685 | \note The mirroring here differs from QImage::mirrored, as a vertically mirrored QImage |
686 | will be mirrored around its x-axis. |
687 | |
688 | \since 5.11 |
689 | */ |
690 | void QVideoFrameFormat::setMirrored(bool mirrored) |
691 | { |
692 | detach(); |
693 | d->transformation.mirrorredHorizontallyAfterRotation = mirrored; |
694 | } |
695 | |
696 | /*! |
697 | Returns the angle by which the surface is rotated clockwise. |
698 | |
699 | Transformations of \c QVideoFrameFormat, specifically, |
700 | rotation and mirroring, can be determined by the orientation of |
701 | the camera sensor, camera settings, or the orientation of |
702 | the video stream. |
703 | |
704 | Rotation is applied before mirroring. |
705 | */ |
706 | QtVideo::Rotation QVideoFrameFormat::rotation() const |
707 | { |
708 | return d->transformation.rotation; |
709 | } |
710 | |
711 | /*! |
712 | Sets the \a angle by which the surface is rotated clockwise. |
713 | |
714 | Transformations of \c QVideoFrameFormat, specifically, |
715 | rotation and mirroring, can be determined by the orientation of |
716 | the camera sensor, camera settings, or the orientation of |
717 | the video stream. |
718 | |
719 | Rotation is applied before mirroring. |
720 | |
721 | Default value is \c QtVideo::Rotation::None. |
722 | */ |
723 | void QVideoFrameFormat::setRotation(QtVideo::Rotation angle) |
724 | { |
725 | detach(); |
726 | d->transformation.rotation = angle; |
727 | } |
728 | |
729 | /*! |
730 | \internal |
731 | */ |
732 | QString QVideoFrameFormat::vertexShaderFileName() const |
733 | { |
734 | return QVideoTextureHelper::vertexShaderFileName(format: *this); |
735 | } |
736 | |
737 | /*! |
738 | \internal |
739 | */ |
740 | QString QVideoFrameFormat::fragmentShaderFileName() const |
741 | { |
742 | return QVideoTextureHelper::fragmentShaderFileName(format: *this); |
743 | } |
744 | |
745 | /*! |
746 | \internal |
747 | */ |
748 | void QVideoFrameFormat::updateUniformData(QByteArray *dst, const QVideoFrame &frame, const QMatrix4x4 &transform, float opacity) const |
749 | { |
750 | QVideoTextureHelper::updateUniformData(dst, format: *this, frame, transform, opacity); |
751 | } |
752 | |
753 | /*! |
754 | \internal |
755 | |
756 | The maximum luminence in nits as set by the HDR metadata. If the video doesn't have meta data, the returned value depends on the |
757 | maximum that can be encoded by the transfer function. |
758 | */ |
759 | float QVideoFrameFormat::maxLuminance() const |
760 | { |
761 | if (d->maxLuminance <= 0) { |
762 | if (d->colorTransfer == ColorTransfer_ST2084) |
763 | return 10000.; // ST2084 can encode up to 10000 nits |
764 | if (d->colorTransfer == ColorTransfer_STD_B67) |
765 | return 1500.; // SRD_B67 can encode up to 1200 nits, use a bit more for some headroom |
766 | return 100; // SDR |
767 | } |
768 | return d->maxLuminance; |
769 | } |
770 | /*! |
771 | Sets the maximum luminance to the given value, \a lum. |
772 | */ |
773 | void QVideoFrameFormat::setMaxLuminance(float lum) |
774 | { |
775 | detach(); |
776 | d->maxLuminance = lum; |
777 | } |
778 | |
779 | |
780 | /*! |
781 | Returns a video pixel format equivalent to an image \a format. If there is no equivalent |
782 | format QVideoFrameFormat::Format_Invalid is returned instead. |
783 | |
784 | \note In general \l QImage does not handle YUV formats. |
785 | |
786 | */ |
787 | QVideoFrameFormat::PixelFormat QVideoFrameFormat::pixelFormatFromImageFormat(QImage::Format format) |
788 | { |
789 | switch (format) { |
790 | #if Q_BYTE_ORDER == Q_LITTLE_ENDIAN |
791 | case QImage::Format_RGB32: |
792 | return QVideoFrameFormat::Format_BGRX8888; |
793 | case QImage::Format_ARGB32: |
794 | return QVideoFrameFormat::Format_BGRA8888; |
795 | case QImage::Format_ARGB32_Premultiplied: |
796 | return QVideoFrameFormat::Format_BGRA8888_Premultiplied; |
797 | #else |
798 | case QImage::Format_RGB32: |
799 | return QVideoFrameFormat::Format_XRGB8888; |
800 | case QImage::Format_ARGB32: |
801 | return QVideoFrameFormat::Format_ARGB8888; |
802 | case QImage::Format_ARGB32_Premultiplied: |
803 | return QVideoFrameFormat::Format_ARGB8888_Premultiplied; |
804 | #endif |
805 | case QImage::Format_RGBA8888: |
806 | return QVideoFrameFormat::Format_RGBA8888; |
807 | case QImage::Format_RGBA8888_Premultiplied: |
808 | // QVideoFrameFormat::Format_RGBA8888_Premultiplied is to be added in 6.8 |
809 | // Format_RGBX8888 suits the best as a workaround |
810 | return QVideoFrameFormat::Format_RGBX8888; |
811 | case QImage::Format_RGBX8888: |
812 | return QVideoFrameFormat::Format_RGBX8888; |
813 | case QImage::Format_Grayscale8: |
814 | return QVideoFrameFormat::Format_Y8; |
815 | case QImage::Format_Grayscale16: |
816 | return QVideoFrameFormat::Format_Y16; |
817 | default: |
818 | return QVideoFrameFormat::Format_Invalid; |
819 | } |
820 | } |
821 | |
822 | /*! |
823 | Returns an image format equivalent to a video frame pixel \a format. If there is no equivalent |
824 | format QImage::Format_Invalid is returned instead. |
825 | |
826 | \note In general \l QImage does not handle YUV formats. |
827 | |
828 | */ |
829 | QImage::Format QVideoFrameFormat::imageFormatFromPixelFormat(QVideoFrameFormat::PixelFormat format) |
830 | { |
831 | switch (format) { |
832 | #if Q_BYTE_ORDER == Q_LITTLE_ENDIAN |
833 | case QVideoFrameFormat::Format_BGRA8888: |
834 | return QImage::Format_ARGB32; |
835 | case QVideoFrameFormat::Format_BGRA8888_Premultiplied: |
836 | return QImage::Format_ARGB32_Premultiplied; |
837 | case QVideoFrameFormat::Format_BGRX8888: |
838 | return QImage::Format_RGB32; |
839 | case QVideoFrameFormat::Format_ARGB8888: |
840 | case QVideoFrameFormat::Format_ARGB8888_Premultiplied: |
841 | case QVideoFrameFormat::Format_XRGB8888: |
842 | return QImage::Format_Invalid; |
843 | #else |
844 | case QVideoFrameFormat::Format_ARGB8888: |
845 | return QImage::Format_ARGB32; |
846 | case QVideoFrameFormat::Format_ARGB8888_Premultiplied: |
847 | return QImage::Format_ARGB32_Premultiplied; |
848 | case QVideoFrameFormat::Format_XRGB8888: |
849 | return QImage::Format_RGB32; |
850 | case QVideoFrameFormat::Format_BGRA8888: |
851 | case QVideoFrameFormat::Format_BGRA8888_Premultiplied: |
852 | case QVideoFrameFormat::Format_BGRX8888: |
853 | return QImage::Format_Invalid; |
854 | #endif |
855 | case QVideoFrameFormat::Format_RGBA8888: |
856 | return QImage::Format_RGBA8888; |
857 | case QVideoFrameFormat::Format_RGBX8888: |
858 | return QImage::Format_RGBX8888; |
859 | case QVideoFrameFormat::Format_Y8: |
860 | return QImage::Format_Grayscale8; |
861 | case QVideoFrameFormat::Format_Y16: |
862 | return QImage::Format_Grayscale16; |
863 | case QVideoFrameFormat::Format_ABGR8888: |
864 | case QVideoFrameFormat::Format_XBGR8888: |
865 | case QVideoFrameFormat::Format_AYUV: |
866 | case QVideoFrameFormat::Format_AYUV_Premultiplied: |
867 | case QVideoFrameFormat::Format_YUV420P: |
868 | case QVideoFrameFormat::Format_YUV420P10: |
869 | case QVideoFrameFormat::Format_YUV422P: |
870 | case QVideoFrameFormat::Format_YV12: |
871 | case QVideoFrameFormat::Format_UYVY: |
872 | case QVideoFrameFormat::Format_YUYV: |
873 | case QVideoFrameFormat::Format_NV12: |
874 | case QVideoFrameFormat::Format_NV21: |
875 | case QVideoFrameFormat::Format_IMC1: |
876 | case QVideoFrameFormat::Format_IMC2: |
877 | case QVideoFrameFormat::Format_IMC3: |
878 | case QVideoFrameFormat::Format_IMC4: |
879 | case QVideoFrameFormat::Format_P010: |
880 | case QVideoFrameFormat::Format_P016: |
881 | case QVideoFrameFormat::Format_Jpeg: |
882 | case QVideoFrameFormat::Format_Invalid: |
883 | case QVideoFrameFormat::Format_SamplerExternalOES: |
884 | case QVideoFrameFormat::Format_SamplerRect: |
885 | return QImage::Format_Invalid; |
886 | } |
887 | return QImage::Format_Invalid; |
888 | } |
889 | |
890 | /*! |
891 | Returns a string representation of the given \a pixelFormat. |
892 | */ |
893 | QString QVideoFrameFormat::pixelFormatToString(QVideoFrameFormat::PixelFormat pixelFormat) |
894 | { |
895 | switch (pixelFormat) { |
896 | case QVideoFrameFormat::Format_Invalid: |
897 | return QStringLiteral("Invalid"); |
898 | case QVideoFrameFormat::Format_ARGB8888: |
899 | return QStringLiteral("ARGB8888"); |
900 | case QVideoFrameFormat::Format_ARGB8888_Premultiplied: |
901 | return QStringLiteral("ARGB8888 Premultiplied"); |
902 | case QVideoFrameFormat::Format_XRGB8888: |
903 | return QStringLiteral("XRGB8888"); |
904 | case QVideoFrameFormat::Format_BGRA8888: |
905 | return QStringLiteral("BGRA8888"); |
906 | case QVideoFrameFormat::Format_BGRX8888: |
907 | return QStringLiteral("BGRX8888"); |
908 | case QVideoFrameFormat::Format_BGRA8888_Premultiplied: |
909 | return QStringLiteral("BGRA8888 Premultiplied"); |
910 | case QVideoFrameFormat::Format_RGBA8888: |
911 | return QStringLiteral("RGBA8888"); |
912 | case QVideoFrameFormat::Format_RGBX8888: |
913 | return QStringLiteral("RGBX8888"); |
914 | case QVideoFrameFormat::Format_ABGR8888: |
915 | return QStringLiteral("ABGR8888"); |
916 | case QVideoFrameFormat::Format_XBGR8888: |
917 | return QStringLiteral("XBGR8888"); |
918 | case QVideoFrameFormat::Format_AYUV: |
919 | return QStringLiteral("AYUV"); |
920 | case QVideoFrameFormat::Format_AYUV_Premultiplied: |
921 | return QStringLiteral("AYUV Premultiplied"); |
922 | case QVideoFrameFormat::Format_YUV420P: |
923 | return QStringLiteral("YUV420P"); |
924 | case QVideoFrameFormat::Format_YUV420P10: |
925 | return QStringLiteral("YUV420P10"); |
926 | case QVideoFrameFormat::Format_YUV422P: |
927 | return QStringLiteral("YUV422P"); |
928 | case QVideoFrameFormat::Format_YV12: |
929 | return QStringLiteral("YV12"); |
930 | case QVideoFrameFormat::Format_UYVY: |
931 | return QStringLiteral("UYVY"); |
932 | case QVideoFrameFormat::Format_YUYV: |
933 | return QStringLiteral("YUYV"); |
934 | case QVideoFrameFormat::Format_NV12: |
935 | return QStringLiteral("NV12"); |
936 | case QVideoFrameFormat::Format_NV21: |
937 | return QStringLiteral("NV21"); |
938 | case QVideoFrameFormat::Format_IMC1: |
939 | return QStringLiteral("IMC1"); |
940 | case QVideoFrameFormat::Format_IMC2: |
941 | return QStringLiteral("IMC2"); |
942 | case QVideoFrameFormat::Format_IMC3: |
943 | return QStringLiteral("IMC3"); |
944 | case QVideoFrameFormat::Format_IMC4: |
945 | return QStringLiteral("IMC4"); |
946 | case QVideoFrameFormat::Format_Y8: |
947 | return QStringLiteral("Y8"); |
948 | case QVideoFrameFormat::Format_Y16: |
949 | return QStringLiteral("Y16"); |
950 | case QVideoFrameFormat::Format_P010: |
951 | return QStringLiteral("P010"); |
952 | case QVideoFrameFormat::Format_P016: |
953 | return QStringLiteral("P016"); |
954 | case QVideoFrameFormat::Format_SamplerExternalOES: |
955 | return QStringLiteral("SamplerExternalOES"); |
956 | case QVideoFrameFormat::Format_Jpeg: |
957 | return QStringLiteral("Jpeg"); |
958 | case QVideoFrameFormat::Format_SamplerRect: |
959 | return QStringLiteral("SamplerRect"); |
960 | } |
961 | |
962 | return QStringLiteral(""); |
963 | } |
964 | |
965 | #ifndef QT_NO_DEBUG_STREAM |
966 | # if QT_DEPRECATED_SINCE(6, 4) |
967 | QDebug operator<<(QDebug dbg, QVideoFrameFormat::YCbCrColorSpace cs) |
968 | { |
969 | QDebugStateSaver saver(dbg); |
970 | dbg.nospace(); |
971 | switch (cs) { |
972 | case QVideoFrameFormat::YCbCr_BT601: |
973 | dbg << "YCbCr_BT601"; |
974 | break; |
975 | case QVideoFrameFormat::YCbCr_BT709: |
976 | dbg << "YCbCr_BT709"; |
977 | break; |
978 | case QVideoFrameFormat::YCbCr_JPEG: |
979 | dbg << "YCbCr_JPEG"; |
980 | break; |
981 | case QVideoFrameFormat::YCbCr_xvYCC601: |
982 | dbg << "YCbCr_xvYCC601"; |
983 | break; |
984 | case QVideoFrameFormat::YCbCr_xvYCC709: |
985 | dbg << "YCbCr_xvYCC709"; |
986 | break; |
987 | case QVideoFrameFormat::YCbCr_BT2020: |
988 | dbg << "YCbCr_BT2020"; |
989 | break; |
990 | default: |
991 | dbg << "YCbCr_Undefined"; |
992 | break; |
993 | } |
994 | return dbg; |
995 | } |
996 | # endif // QT_DEPRECATED_SINCE(6, 4) |
997 | |
998 | QDebug operator<<(QDebug dbg, QVideoFrameFormat::ColorSpace cs) |
999 | { |
1000 | QDebugStateSaver saver(dbg); |
1001 | dbg.nospace(); |
1002 | switch (cs) { |
1003 | case QVideoFrameFormat::ColorSpace_BT601: |
1004 | dbg << "ColorSpace_BT601"; |
1005 | break; |
1006 | case QVideoFrameFormat::ColorSpace_BT709: |
1007 | dbg << "ColorSpace_BT709"; |
1008 | break; |
1009 | case QVideoFrameFormat::ColorSpace_AdobeRgb: |
1010 | dbg << "ColorSpace_AdobeRgb"; |
1011 | break; |
1012 | case QVideoFrameFormat::ColorSpace_BT2020: |
1013 | dbg << "ColorSpace_BT2020"; |
1014 | break; |
1015 | default: |
1016 | dbg << "ColorSpace_Undefined"; |
1017 | break; |
1018 | } |
1019 | return dbg; |
1020 | } |
1021 | |
1022 | QDebug operator<<(QDebug dbg, QVideoFrameFormat::Direction dir) |
1023 | { |
1024 | QDebugStateSaver saver(dbg); |
1025 | dbg.nospace(); |
1026 | switch (dir) { |
1027 | case QVideoFrameFormat::BottomToTop: |
1028 | dbg << "BottomToTop"; |
1029 | break; |
1030 | case QVideoFrameFormat::TopToBottom: |
1031 | dbg << "TopToBottom"; |
1032 | break; |
1033 | } |
1034 | return dbg; |
1035 | } |
1036 | |
1037 | QDebug operator<<(QDebug dbg, const QVideoFrameFormat &f) |
1038 | { |
1039 | QDebugStateSaver saver(dbg); |
1040 | dbg.nospace(); |
1041 | dbg << "QVideoFrameFormat("<< f.pixelFormat() << ", "<< f.frameSize() |
1042 | << ", viewport="<< f.viewport() |
1043 | << ", colorSpace="<< f.colorSpace() |
1044 | << ')' |
1045 | << "\n pixel format="<< f.pixelFormat() |
1046 | << "\n frame size="<< f.frameSize() |
1047 | << "\n viewport="<< f.viewport() |
1048 | << "\n colorSpace="<< f.colorSpace() |
1049 | << "\n frameRate="<< f.streamFrameRate() |
1050 | << "\n mirrored="<< f.isMirrored(); |
1051 | |
1052 | return dbg; |
1053 | } |
1054 | |
1055 | QDebug operator<<(QDebug dbg, QVideoFrameFormat::PixelFormat pf) |
1056 | { |
1057 | QDebugStateSaver saver(dbg); |
1058 | dbg.nospace(); |
1059 | |
1060 | auto format = QVideoFrameFormat::pixelFormatToString(pixelFormat: pf); |
1061 | if (format.isEmpty()) |
1062 | return dbg; |
1063 | |
1064 | dbg.noquote() << QStringLiteral("Format_") << format; |
1065 | return dbg; |
1066 | } |
1067 | #endif |
1068 | |
1069 | QT_END_NAMESPACE |
1070 |
Definitions
- initResource
- QVideoFrameFormatPrivate
- QVideoFrameFormatPrivate
- QVideoFrameFormatPrivate
- operator ==
- frameRatesEqual
- QVideoFrameFormat
- QVideoFrameFormat
- QVideoFrameFormat
- operator =
- ~QVideoFrameFormat
- isValid
- operator ==
- operator !=
- detach
- pixelFormat
- frameSize
- frameWidth
- frameHeight
- planeCount
- setFrameSize
- setFrameSize
- viewport
- setViewport
- scanLineDirection
- setScanLineDirection
- frameRate
- setFrameRate
- streamFrameRate
- setStreamFrameRate
- yCbCrColorSpace
- setYCbCrColorSpace
- colorSpace
- setColorSpace
- colorTransfer
- setColorTransfer
- colorRange
- setColorRange
- isMirrored
- setMirrored
- rotation
- setRotation
- vertexShaderFileName
- fragmentShaderFileName
- updateUniformData
- maxLuminance
- setMaxLuminance
- pixelFormatFromImageFormat
- imageFormatFromPixelFormat
- pixelFormatToString
- operator<<
- operator<<
- operator<<
- operator<<
Learn to use CMake with our Intro Training
Find out more