1 | // Copyright (C) 2021 The Qt Company Ltd. |
---|---|
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "qffmpeg_p.h" |
5 | |
6 | #include <qdebug.h> |
7 | #include <qloggingcategory.h> |
8 | |
9 | extern "C"{ |
10 | #include <libavutil/pixdesc.h> |
11 | #include <libavutil/samplefmt.h> |
12 | |
13 | #ifdef Q_OS_DARWIN |
14 | #include <libavutil/hwcontext_videotoolbox.h> |
15 | #endif |
16 | } |
17 | |
18 | QT_BEGIN_NAMESPACE |
19 | |
20 | static Q_LOGGING_CATEGORY(qLcFFmpegUtils, "qt.multimedia.ffmpeg.utils"); |
21 | |
22 | namespace QFFmpeg { |
23 | |
24 | bool isAVFormatSupported(const Codec &codec, PixelOrSampleFormat format) |
25 | { |
26 | if (codec.type() == AVMEDIA_TYPE_VIDEO) { |
27 | auto checkFormat = [format](AVPixelFormat f) { return f == format; }; |
28 | return findAVPixelFormat(codec, predicate: checkFormat).has_value(); |
29 | } |
30 | |
31 | if (codec.type() == AVMEDIA_TYPE_AUDIO) { |
32 | const auto sampleFormats = codec.sampleFormats(); |
33 | return hasValue(range: sampleFormats, value: AVSampleFormat(format)); |
34 | } |
35 | |
36 | return false; |
37 | } |
38 | |
39 | bool isHwPixelFormat(AVPixelFormat format) |
40 | { |
41 | const auto desc = av_pix_fmt_desc_get(pix_fmt: format); |
42 | return desc && (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) != 0; |
43 | } |
44 | |
45 | void applyExperimentalCodecOptions(const Codec &codec, AVDictionary **opts) |
46 | { |
47 | if (codec.isExperimental()) { |
48 | qCWarning(qLcFFmpegUtils) << "Applying the option 'strict -2' for the experimental codec" |
49 | << codec.name() << ". it's unlikely to work properly"; |
50 | av_dict_set(pm: opts, key: "strict", value: "-2", flags: 0); |
51 | } |
52 | } |
53 | |
54 | AVPixelFormat pixelFormatForHwDevice(AVHWDeviceType deviceType) |
55 | { |
56 | switch (deviceType) { |
57 | case AV_HWDEVICE_TYPE_VIDEOTOOLBOX: |
58 | return AV_PIX_FMT_VIDEOTOOLBOX; |
59 | case AV_HWDEVICE_TYPE_VAAPI: |
60 | return AV_PIX_FMT_VAAPI; |
61 | case AV_HWDEVICE_TYPE_MEDIACODEC: |
62 | return AV_PIX_FMT_MEDIACODEC; |
63 | case AV_HWDEVICE_TYPE_CUDA: |
64 | return AV_PIX_FMT_CUDA; |
65 | case AV_HWDEVICE_TYPE_VDPAU: |
66 | return AV_PIX_FMT_VDPAU; |
67 | case AV_HWDEVICE_TYPE_OPENCL: |
68 | return AV_PIX_FMT_OPENCL; |
69 | case AV_HWDEVICE_TYPE_QSV: |
70 | return AV_PIX_FMT_QSV; |
71 | case AV_HWDEVICE_TYPE_D3D11VA: |
72 | return AV_PIX_FMT_D3D11; |
73 | #if QT_FFMPEG_HAS_D3D12VA |
74 | case AV_HWDEVICE_TYPE_D3D12VA: |
75 | return AV_PIX_FMT_D3D12; |
76 | #endif |
77 | case AV_HWDEVICE_TYPE_DXVA2: |
78 | return AV_PIX_FMT_DXVA2_VLD; |
79 | case AV_HWDEVICE_TYPE_DRM: |
80 | return AV_PIX_FMT_DRM_PRIME; |
81 | #if QT_FFMPEG_HAS_VULKAN |
82 | case AV_HWDEVICE_TYPE_VULKAN: |
83 | return AV_PIX_FMT_VULKAN; |
84 | #endif |
85 | default: |
86 | return AV_PIX_FMT_NONE; |
87 | } |
88 | } |
89 | |
90 | AVPacketSideData *addStreamSideData(AVStream *stream, AVPacketSideData sideData) |
91 | { |
92 | QScopeGuard freeData([&sideData]() { av_free(ptr: sideData.data); }); |
93 | #if QT_FFMPEG_STREAM_SIDE_DATA_DEPRECATED |
94 | AVPacketSideData *result = av_packet_side_data_add( |
95 | &stream->codecpar->coded_side_data, |
96 | &stream->codecpar->nb_coded_side_data, |
97 | sideData.type, |
98 | sideData.data, |
99 | sideData.size, |
100 | 0); |
101 | if (result) { |
102 | // If the result is not null, the ownership is taken by AVStream, |
103 | // otherwise the data must be deleted. |
104 | freeData.dismiss(); |
105 | return result; |
106 | } |
107 | #else |
108 | Q_UNUSED(stream); |
109 | // TODO: implement for older FFmpeg versions |
110 | qWarning() << "Adding stream side data is not supported for FFmpeg < 6.1"; |
111 | #endif |
112 | |
113 | return nullptr; |
114 | } |
115 | |
116 | const AVPacketSideData *streamSideData(const AVStream *stream, AVPacketSideDataType type) |
117 | { |
118 | Q_ASSERT(stream); |
119 | |
120 | #if QT_FFMPEG_STREAM_SIDE_DATA_DEPRECATED |
121 | return av_packet_side_data_get(stream->codecpar->coded_side_data, |
122 | stream->codecpar->nb_coded_side_data, type); |
123 | #else |
124 | auto checkType = [type](const auto &item) { return item.type == type; }; |
125 | const auto end = stream->side_data + stream->nb_side_data; |
126 | const auto found = std::find_if(first: stream->side_data, last: end, pred: checkType); |
127 | return found == end ? nullptr : found; |
128 | #endif |
129 | } |
130 | |
131 | SwrContextUPtr createResampleContext(const AVAudioFormat &inputFormat, |
132 | const AVAudioFormat &outputFormat) |
133 | { |
134 | SwrContext *resampler = nullptr; |
135 | #if QT_FFMPEG_HAS_AV_CHANNEL_LAYOUT |
136 | |
137 | #if QT_FFMPEG_SWR_CONST_CH_LAYOUT |
138 | using AVChannelLayoutPrm = const AVChannelLayout*; |
139 | #else |
140 | using AVChannelLayoutPrm = AVChannelLayout*; |
141 | #endif |
142 | |
143 | swr_alloc_set_opts2(&resampler, |
144 | const_cast<AVChannelLayoutPrm>(&outputFormat.channelLayout), |
145 | outputFormat.sampleFormat, |
146 | outputFormat.sampleRate, |
147 | const_cast<AVChannelLayoutPrm>(&inputFormat.channelLayout), |
148 | inputFormat.sampleFormat, |
149 | inputFormat.sampleRate, |
150 | 0, |
151 | nullptr); |
152 | |
153 | #else |
154 | |
155 | resampler = swr_alloc_set_opts(s: nullptr, |
156 | out_ch_layout: outputFormat.channelLayoutMask, |
157 | out_sample_fmt: outputFormat.sampleFormat, |
158 | out_sample_rate: outputFormat.sampleRate, |
159 | in_ch_layout: inputFormat.channelLayoutMask, |
160 | in_sample_fmt: inputFormat.sampleFormat, |
161 | in_sample_rate: inputFormat.sampleRate, |
162 | log_offset: 0, |
163 | log_ctx: nullptr); |
164 | |
165 | #endif |
166 | |
167 | swr_init(s: resampler); |
168 | return SwrContextUPtr(resampler); |
169 | } |
170 | |
171 | QVideoFrameFormat::ColorTransfer fromAvColorTransfer(AVColorTransferCharacteristic colorTrc) { |
172 | switch (colorTrc) { |
173 | case AVCOL_TRC_BT709: |
174 | // The following three cases have transfer characteristics identical to BT709 |
175 | case AVCOL_TRC_BT1361_ECG: |
176 | case AVCOL_TRC_BT2020_10: |
177 | case AVCOL_TRC_BT2020_12: |
178 | case AVCOL_TRC_SMPTE240M: // almost identical to bt709 |
179 | return QVideoFrameFormat::ColorTransfer_BT709; |
180 | case AVCOL_TRC_GAMMA22: |
181 | case AVCOL_TRC_SMPTE428: // No idea, let's hope for the best... |
182 | case AVCOL_TRC_IEC61966_2_1: // sRGB, close enough to 2.2... |
183 | case AVCOL_TRC_IEC61966_2_4: // not quite, but probably close enough |
184 | return QVideoFrameFormat::ColorTransfer_Gamma22; |
185 | case AVCOL_TRC_GAMMA28: |
186 | return QVideoFrameFormat::ColorTransfer_Gamma28; |
187 | case AVCOL_TRC_SMPTE170M: |
188 | return QVideoFrameFormat::ColorTransfer_BT601; |
189 | case AVCOL_TRC_LINEAR: |
190 | return QVideoFrameFormat::ColorTransfer_Linear; |
191 | case AVCOL_TRC_SMPTE2084: |
192 | return QVideoFrameFormat::ColorTransfer_ST2084; |
193 | case AVCOL_TRC_ARIB_STD_B67: |
194 | return QVideoFrameFormat::ColorTransfer_STD_B67; |
195 | default: |
196 | break; |
197 | } |
198 | return QVideoFrameFormat::ColorTransfer_Unknown; |
199 | } |
200 | |
201 | AVColorTransferCharacteristic toAvColorTransfer(QVideoFrameFormat::ColorTransfer colorTrc) |
202 | { |
203 | switch (colorTrc) { |
204 | case QVideoFrameFormat::ColorTransfer_BT709: |
205 | return AVCOL_TRC_BT709; |
206 | case QVideoFrameFormat::ColorTransfer_BT601: |
207 | return AVCOL_TRC_BT709; // which one is the best? |
208 | case QVideoFrameFormat::ColorTransfer_Linear: |
209 | return AVCOL_TRC_SMPTE2084; |
210 | case QVideoFrameFormat::ColorTransfer_Gamma22: |
211 | return AVCOL_TRC_GAMMA22; |
212 | case QVideoFrameFormat::ColorTransfer_Gamma28: |
213 | return AVCOL_TRC_GAMMA28; |
214 | case QVideoFrameFormat::ColorTransfer_ST2084: |
215 | return AVCOL_TRC_SMPTE2084; |
216 | case QVideoFrameFormat::ColorTransfer_STD_B67: |
217 | return AVCOL_TRC_ARIB_STD_B67; |
218 | default: |
219 | return AVCOL_TRC_UNSPECIFIED; |
220 | } |
221 | } |
222 | |
223 | QVideoFrameFormat::ColorSpace fromAvColorSpace(AVColorSpace colorSpace) |
224 | { |
225 | switch (colorSpace) { |
226 | default: |
227 | case AVCOL_SPC_UNSPECIFIED: |
228 | case AVCOL_SPC_RESERVED: |
229 | case AVCOL_SPC_FCC: |
230 | case AVCOL_SPC_SMPTE240M: |
231 | case AVCOL_SPC_YCGCO: |
232 | case AVCOL_SPC_SMPTE2085: |
233 | case AVCOL_SPC_CHROMA_DERIVED_NCL: |
234 | case AVCOL_SPC_CHROMA_DERIVED_CL: |
235 | case AVCOL_SPC_ICTCP: // BT.2100 ICtCp |
236 | return QVideoFrameFormat::ColorSpace_Undefined; |
237 | case AVCOL_SPC_RGB: |
238 | return QVideoFrameFormat::ColorSpace_AdobeRgb; |
239 | case AVCOL_SPC_BT709: |
240 | return QVideoFrameFormat::ColorSpace_BT709; |
241 | case AVCOL_SPC_BT470BG: // BT601 |
242 | case AVCOL_SPC_SMPTE170M: // Also BT601 |
243 | return QVideoFrameFormat::ColorSpace_BT601; |
244 | case AVCOL_SPC_BT2020_NCL: // Non constant luminence |
245 | case AVCOL_SPC_BT2020_CL: // Constant luminence |
246 | return QVideoFrameFormat::ColorSpace_BT2020; |
247 | } |
248 | } |
249 | |
250 | AVColorSpace toAvColorSpace(QVideoFrameFormat::ColorSpace colorSpace) |
251 | { |
252 | switch (colorSpace) { |
253 | case QVideoFrameFormat::ColorSpace_BT601: |
254 | return AVCOL_SPC_BT470BG; |
255 | case QVideoFrameFormat::ColorSpace_BT709: |
256 | return AVCOL_SPC_BT709; |
257 | case QVideoFrameFormat::ColorSpace_AdobeRgb: |
258 | return AVCOL_SPC_RGB; |
259 | case QVideoFrameFormat::ColorSpace_BT2020: |
260 | return AVCOL_SPC_BT2020_CL; |
261 | default: |
262 | return AVCOL_SPC_UNSPECIFIED; |
263 | } |
264 | } |
265 | |
266 | QVideoFrameFormat::ColorRange fromAvColorRange(AVColorRange colorRange) |
267 | { |
268 | switch (colorRange) { |
269 | case AVCOL_RANGE_MPEG: |
270 | return QVideoFrameFormat::ColorRange_Video; |
271 | case AVCOL_RANGE_JPEG: |
272 | return QVideoFrameFormat::ColorRange_Full; |
273 | default: |
274 | return QVideoFrameFormat::ColorRange_Unknown; |
275 | } |
276 | } |
277 | |
278 | AVColorRange toAvColorRange(QVideoFrameFormat::ColorRange colorRange) |
279 | { |
280 | switch (colorRange) { |
281 | case QVideoFrameFormat::ColorRange_Video: |
282 | return AVCOL_RANGE_MPEG; |
283 | case QVideoFrameFormat::ColorRange_Full: |
284 | return AVCOL_RANGE_JPEG; |
285 | default: |
286 | return AVCOL_RANGE_UNSPECIFIED; |
287 | } |
288 | } |
289 | |
290 | AVHWDeviceContext* avFrameDeviceContext(const AVFrame* frame) { |
291 | if (!frame) |
292 | return {}; |
293 | if (!frame->hw_frames_ctx) |
294 | return {}; |
295 | |
296 | const auto *frameCtx = reinterpret_cast<AVHWFramesContext *>(frame->hw_frames_ctx->data); |
297 | if (!frameCtx) |
298 | return {}; |
299 | |
300 | return frameCtx->device_ctx; |
301 | } |
302 | |
303 | SwsContextUPtr createSwsContext(const QSize &srcSize, AVPixelFormat srcPixFmt, const QSize &dstSize, |
304 | AVPixelFormat dstPixFmt, int conversionType) |
305 | { |
306 | |
307 | SwsContext *result = |
308 | sws_getContext(srcW: srcSize.width(), srcH: srcSize.height(), srcFormat: srcPixFmt, dstW: dstSize.width(), |
309 | dstH: dstSize.height(), dstFormat: dstPixFmt, flags: conversionType, srcFilter: nullptr, dstFilter: nullptr, param: nullptr); |
310 | |
311 | if (!result) |
312 | qCWarning(qLcFFmpegUtils) << "Cannot create sws context for:\n" |
313 | << "srcSize:"<< srcSize |
314 | << "srcPixFmt:"<< srcPixFmt |
315 | << "dstSize:"<< dstSize |
316 | << "dstPixFmt:"<< dstPixFmt |
317 | << "conversionType:"<< conversionType; |
318 | |
319 | return SwsContextUPtr(result); |
320 | } |
321 | |
322 | #ifdef Q_OS_DARWIN |
323 | bool isCVFormatSupported(uint32_t cvFormat) |
324 | { |
325 | return av_map_videotoolbox_format_to_pixfmt(cvFormat) != AV_PIX_FMT_NONE; |
326 | } |
327 | |
328 | std::string cvFormatToString(uint32_t cvFormat) |
329 | { |
330 | auto formatDescIt = std::make_reverse_iterator(reinterpret_cast<const char *>(&cvFormat)); |
331 | return std::string(formatDescIt - 4, formatDescIt); |
332 | } |
333 | |
334 | #endif |
335 | |
336 | } // namespace QFFmpeg |
337 | |
338 | QDebug operator<<(QDebug dbg, const AVRational &value) |
339 | { |
340 | dbg << value.num << "/"<< value.den; |
341 | return dbg; |
342 | } |
343 | |
344 | #if QT_FFMPEG_HAS_AV_CHANNEL_LAYOUT |
345 | QDebug operator<<(QDebug dbg, const AVChannelLayout &layout) |
346 | { |
347 | dbg << '['; |
348 | dbg << "nb_channels:"<< layout.nb_channels; |
349 | dbg << ", order:"<< layout.order; |
350 | |
351 | if (layout.order == AV_CHANNEL_ORDER_NATIVE || layout.order == AV_CHANNEL_ORDER_AMBISONIC) |
352 | dbg << ", mask:"<< Qt::bin << layout.u.mask << Qt::dec; |
353 | else if (layout.order == AV_CHANNEL_ORDER_CUSTOM && layout.u.map) |
354 | dbg << ", id: "<< layout.u.map->id; |
355 | |
356 | dbg << ']'; |
357 | |
358 | return dbg; |
359 | } |
360 | #endif |
361 | |
362 | #if QT_FFMPEG_HAS_AVCODEC_GET_SUPPORTED_CONFIG |
363 | QDebug operator<<(QDebug dbg, const AVCodecConfig value) |
364 | { |
365 | switch (value) { |
366 | case AV_CODEC_CONFIG_CHANNEL_LAYOUT: |
367 | dbg << "AV_CODEC_CONFIG_CHANNEL_LAYOUT"; |
368 | break; |
369 | case AV_CODEC_CONFIG_COLOR_RANGE: |
370 | dbg << "AV_CODEC_CONFIG_COLOR_RANGE"; |
371 | break; |
372 | case AV_CODEC_CONFIG_COLOR_SPACE: |
373 | dbg << "AV_CODEC_CONFIG_COLOR_SPACE"; |
374 | break; |
375 | case AV_CODEC_CONFIG_FRAME_RATE: |
376 | dbg << "AV_CODEC_CONFIG_FRAME_RATE"; |
377 | break; |
378 | case AV_CODEC_CONFIG_PIX_FORMAT: |
379 | dbg << "AV_CODEC_CONFIG_PIX_FORMAT"; |
380 | break; |
381 | case AV_CODEC_CONFIG_SAMPLE_FORMAT: |
382 | dbg << "AV_CODEC_CONFIG_SAMPLE_FORMAT"; |
383 | break; |
384 | case AV_CODEC_CONFIG_SAMPLE_RATE: |
385 | dbg << "AV_CODEC_CONFIG_SAMPLE_RATE"; |
386 | break; |
387 | default: |
388 | dbg << "<UNKNOWN_CODEC_CONFIG>"; |
389 | break; |
390 | } |
391 | |
392 | return dbg; |
393 | } |
394 | #endif |
395 | |
396 | QT_END_NAMESPACE |
397 |
Definitions
Learn Advanced QML with KDAB
Find out more