1 | // Copyright (C) 2021 The Qt Company Ltd. |
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "qffmpeg_p.h" |
5 | |
6 | #include <qdebug.h> |
7 | #include <qloggingcategory.h> |
8 | |
9 | extern "C" { |
10 | #include <libavutil/pixdesc.h> |
11 | #include <libavutil/samplefmt.h> |
12 | |
13 | #ifdef Q_OS_DARWIN |
14 | #include <libavutil/hwcontext_videotoolbox.h> |
15 | #endif |
16 | } |
17 | |
18 | QT_BEGIN_NAMESPACE |
19 | |
20 | static Q_LOGGING_CATEGORY(qLcFFmpegUtils, "qt.multimedia.ffmpeg.utils" ); |
21 | |
22 | namespace QFFmpeg { |
23 | |
24 | bool isAVFormatSupported(const AVCodec *codec, PixelOrSampleFormat format) |
25 | { |
26 | if (codec->type == AVMEDIA_TYPE_VIDEO) { |
27 | auto checkFormat = [format](AVPixelFormat f) { return f == format; }; |
28 | return findAVPixelFormat(codec, predicate: checkFormat) != AV_PIX_FMT_NONE; |
29 | } |
30 | |
31 | if (codec->type == AVMEDIA_TYPE_AUDIO) { |
32 | const auto sampleFormats = getCodecSampleFormats(codec); |
33 | return hasAVValue(fmts: sampleFormats, format: AVSampleFormat(format)); |
34 | } |
35 | |
36 | return false; |
37 | } |
38 | |
39 | bool isHwPixelFormat(AVPixelFormat format) |
40 | { |
41 | const auto desc = av_pix_fmt_desc_get(pix_fmt: format); |
42 | return desc && (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) != 0; |
43 | } |
44 | |
45 | bool isAVCodecExperimental(const AVCodec *codec) |
46 | { |
47 | return (codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) != 0; |
48 | } |
49 | |
50 | void applyExperimentalCodecOptions(const AVCodec *codec, AVDictionary** opts) |
51 | { |
52 | if (isAVCodecExperimental(codec)) { |
53 | qCWarning(qLcFFmpegUtils) << "Applying the option 'strict -2' for the experimental codec" |
54 | << codec->name << ". it's unlikely to work properly" ; |
55 | av_dict_set(pm: opts, key: "strict" , value: "-2" , flags: 0); |
56 | } |
57 | } |
58 | |
59 | AVPixelFormat pixelFormatForHwDevice(AVHWDeviceType deviceType) |
60 | { |
61 | switch (deviceType) { |
62 | case AV_HWDEVICE_TYPE_VIDEOTOOLBOX: |
63 | return AV_PIX_FMT_VIDEOTOOLBOX; |
64 | case AV_HWDEVICE_TYPE_VAAPI: |
65 | return AV_PIX_FMT_VAAPI; |
66 | case AV_HWDEVICE_TYPE_MEDIACODEC: |
67 | return AV_PIX_FMT_MEDIACODEC; |
68 | case AV_HWDEVICE_TYPE_CUDA: |
69 | return AV_PIX_FMT_CUDA; |
70 | case AV_HWDEVICE_TYPE_VDPAU: |
71 | return AV_PIX_FMT_VDPAU; |
72 | case AV_HWDEVICE_TYPE_OPENCL: |
73 | return AV_PIX_FMT_OPENCL; |
74 | case AV_HWDEVICE_TYPE_QSV: |
75 | return AV_PIX_FMT_QSV; |
76 | case AV_HWDEVICE_TYPE_D3D11VA: |
77 | return AV_PIX_FMT_D3D11; |
78 | #if QT_FFMPEG_HAS_D3D12VA |
79 | case AV_HWDEVICE_TYPE_D3D12VA: |
80 | return AV_PIX_FMT_D3D12; |
81 | #endif |
82 | case AV_HWDEVICE_TYPE_DXVA2: |
83 | return AV_PIX_FMT_DXVA2_VLD; |
84 | case AV_HWDEVICE_TYPE_DRM: |
85 | return AV_PIX_FMT_DRM_PRIME; |
86 | #if QT_FFMPEG_HAS_VULKAN |
87 | case AV_HWDEVICE_TYPE_VULKAN: |
88 | return AV_PIX_FMT_VULKAN; |
89 | #endif |
90 | default: |
91 | return AV_PIX_FMT_NONE; |
92 | } |
93 | } |
94 | |
95 | AVPacketSideData *addStreamSideData(AVStream *stream, AVPacketSideData sideData) |
96 | { |
97 | QScopeGuard freeData([&sideData]() { av_free(ptr: sideData.data); }); |
98 | #if QT_FFMPEG_STREAM_SIDE_DATA_DEPRECATED |
99 | AVPacketSideData *result = av_packet_side_data_add( |
100 | &stream->codecpar->coded_side_data, |
101 | &stream->codecpar->nb_coded_side_data, |
102 | sideData.type, |
103 | sideData.data, |
104 | sideData.size, |
105 | 0); |
106 | if (result) { |
107 | // If the result is not null, the ownership is taken by AVStream, |
108 | // otherwise the data must be deleted. |
109 | freeData.dismiss(); |
110 | return result; |
111 | } |
112 | #else |
113 | Q_UNUSED(stream); |
114 | // TODO: implement for older FFmpeg versions |
115 | qWarning() << "Adding stream side data is not supported for FFmpeg < 6.1" ; |
116 | #endif |
117 | |
118 | return nullptr; |
119 | } |
120 | |
121 | const AVPacketSideData *streamSideData(const AVStream *stream, AVPacketSideDataType type) |
122 | { |
123 | Q_ASSERT(stream); |
124 | |
125 | #if QT_FFMPEG_STREAM_SIDE_DATA_DEPRECATED |
126 | return av_packet_side_data_get(stream->codecpar->coded_side_data, |
127 | stream->codecpar->nb_coded_side_data, type); |
128 | #else |
129 | auto checkType = [type](const auto &item) { return item.type == type; }; |
130 | const auto end = stream->side_data + stream->nb_side_data; |
131 | const auto found = std::find_if(first: stream->side_data, last: end, pred: checkType); |
132 | return found == end ? nullptr : found; |
133 | #endif |
134 | } |
135 | |
136 | SwrContextUPtr createResampleContext(const AVAudioFormat &inputFormat, |
137 | const AVAudioFormat &outputFormat) |
138 | { |
139 | SwrContext *resampler = nullptr; |
140 | #if QT_FFMPEG_HAS_AV_CHANNEL_LAYOUT |
141 | |
142 | #if QT_FFMPEG_SWR_CONST_CH_LAYOUT |
143 | using AVChannelLayoutPrm = const AVChannelLayout*; |
144 | #else |
145 | using AVChannelLayoutPrm = AVChannelLayout*; |
146 | #endif |
147 | |
148 | swr_alloc_set_opts2(&resampler, |
149 | const_cast<AVChannelLayoutPrm>(&outputFormat.channelLayout), |
150 | outputFormat.sampleFormat, |
151 | outputFormat.sampleRate, |
152 | const_cast<AVChannelLayoutPrm>(&inputFormat.channelLayout), |
153 | inputFormat.sampleFormat, |
154 | inputFormat.sampleRate, |
155 | 0, |
156 | nullptr); |
157 | |
158 | #else |
159 | |
160 | resampler = swr_alloc_set_opts(s: nullptr, |
161 | out_ch_layout: outputFormat.channelLayoutMask, |
162 | out_sample_fmt: outputFormat.sampleFormat, |
163 | out_sample_rate: outputFormat.sampleRate, |
164 | in_ch_layout: inputFormat.channelLayoutMask, |
165 | in_sample_fmt: inputFormat.sampleFormat, |
166 | in_sample_rate: inputFormat.sampleRate, |
167 | log_offset: 0, |
168 | log_ctx: nullptr); |
169 | |
170 | #endif |
171 | |
172 | swr_init(s: resampler); |
173 | return SwrContextUPtr(resampler); |
174 | } |
175 | |
176 | QVideoFrameFormat::ColorTransfer fromAvColorTransfer(AVColorTransferCharacteristic colorTrc) { |
177 | switch (colorTrc) { |
178 | case AVCOL_TRC_BT709: |
179 | // The following three cases have transfer characteristics identical to BT709 |
180 | case AVCOL_TRC_BT1361_ECG: |
181 | case AVCOL_TRC_BT2020_10: |
182 | case AVCOL_TRC_BT2020_12: |
183 | case AVCOL_TRC_SMPTE240M: // almost identical to bt709 |
184 | return QVideoFrameFormat::ColorTransfer_BT709; |
185 | case AVCOL_TRC_GAMMA22: |
186 | case AVCOL_TRC_SMPTE428: // No idea, let's hope for the best... |
187 | case AVCOL_TRC_IEC61966_2_1: // sRGB, close enough to 2.2... |
188 | case AVCOL_TRC_IEC61966_2_4: // not quite, but probably close enough |
189 | return QVideoFrameFormat::ColorTransfer_Gamma22; |
190 | case AVCOL_TRC_GAMMA28: |
191 | return QVideoFrameFormat::ColorTransfer_Gamma28; |
192 | case AVCOL_TRC_SMPTE170M: |
193 | return QVideoFrameFormat::ColorTransfer_BT601; |
194 | case AVCOL_TRC_LINEAR: |
195 | return QVideoFrameFormat::ColorTransfer_Linear; |
196 | case AVCOL_TRC_SMPTE2084: |
197 | return QVideoFrameFormat::ColorTransfer_ST2084; |
198 | case AVCOL_TRC_ARIB_STD_B67: |
199 | return QVideoFrameFormat::ColorTransfer_STD_B67; |
200 | default: |
201 | break; |
202 | } |
203 | return QVideoFrameFormat::ColorTransfer_Unknown; |
204 | } |
205 | |
206 | AVColorTransferCharacteristic toAvColorTransfer(QVideoFrameFormat::ColorTransfer colorTrc) |
207 | { |
208 | switch (colorTrc) { |
209 | case QVideoFrameFormat::ColorTransfer_BT709: |
210 | return AVCOL_TRC_BT709; |
211 | case QVideoFrameFormat::ColorTransfer_BT601: |
212 | return AVCOL_TRC_BT709; // which one is the best? |
213 | case QVideoFrameFormat::ColorTransfer_Linear: |
214 | return AVCOL_TRC_SMPTE2084; |
215 | case QVideoFrameFormat::ColorTransfer_Gamma22: |
216 | return AVCOL_TRC_GAMMA22; |
217 | case QVideoFrameFormat::ColorTransfer_Gamma28: |
218 | return AVCOL_TRC_GAMMA28; |
219 | case QVideoFrameFormat::ColorTransfer_ST2084: |
220 | return AVCOL_TRC_SMPTE2084; |
221 | case QVideoFrameFormat::ColorTransfer_STD_B67: |
222 | return AVCOL_TRC_ARIB_STD_B67; |
223 | default: |
224 | return AVCOL_TRC_UNSPECIFIED; |
225 | } |
226 | } |
227 | |
228 | QVideoFrameFormat::ColorSpace fromAvColorSpace(AVColorSpace colorSpace) |
229 | { |
230 | switch (colorSpace) { |
231 | default: |
232 | case AVCOL_SPC_UNSPECIFIED: |
233 | case AVCOL_SPC_RESERVED: |
234 | case AVCOL_SPC_FCC: |
235 | case AVCOL_SPC_SMPTE240M: |
236 | case AVCOL_SPC_YCGCO: |
237 | case AVCOL_SPC_SMPTE2085: |
238 | case AVCOL_SPC_CHROMA_DERIVED_NCL: |
239 | case AVCOL_SPC_CHROMA_DERIVED_CL: |
240 | case AVCOL_SPC_ICTCP: // BT.2100 ICtCp |
241 | return QVideoFrameFormat::ColorSpace_Undefined; |
242 | case AVCOL_SPC_RGB: |
243 | return QVideoFrameFormat::ColorSpace_AdobeRgb; |
244 | case AVCOL_SPC_BT709: |
245 | return QVideoFrameFormat::ColorSpace_BT709; |
246 | case AVCOL_SPC_BT470BG: // BT601 |
247 | case AVCOL_SPC_SMPTE170M: // Also BT601 |
248 | return QVideoFrameFormat::ColorSpace_BT601; |
249 | case AVCOL_SPC_BT2020_NCL: // Non constant luminence |
250 | case AVCOL_SPC_BT2020_CL: // Constant luminence |
251 | return QVideoFrameFormat::ColorSpace_BT2020; |
252 | } |
253 | } |
254 | |
255 | AVColorSpace toAvColorSpace(QVideoFrameFormat::ColorSpace colorSpace) |
256 | { |
257 | switch (colorSpace) { |
258 | case QVideoFrameFormat::ColorSpace_BT601: |
259 | return AVCOL_SPC_BT470BG; |
260 | case QVideoFrameFormat::ColorSpace_BT709: |
261 | return AVCOL_SPC_BT709; |
262 | case QVideoFrameFormat::ColorSpace_AdobeRgb: |
263 | return AVCOL_SPC_RGB; |
264 | case QVideoFrameFormat::ColorSpace_BT2020: |
265 | return AVCOL_SPC_BT2020_CL; |
266 | default: |
267 | return AVCOL_SPC_UNSPECIFIED; |
268 | } |
269 | } |
270 | |
271 | QVideoFrameFormat::ColorRange fromAvColorRange(AVColorRange colorRange) |
272 | { |
273 | switch (colorRange) { |
274 | case AVCOL_RANGE_MPEG: |
275 | return QVideoFrameFormat::ColorRange_Video; |
276 | case AVCOL_RANGE_JPEG: |
277 | return QVideoFrameFormat::ColorRange_Full; |
278 | default: |
279 | return QVideoFrameFormat::ColorRange_Unknown; |
280 | } |
281 | } |
282 | |
283 | AVColorRange toAvColorRange(QVideoFrameFormat::ColorRange colorRange) |
284 | { |
285 | switch (colorRange) { |
286 | case QVideoFrameFormat::ColorRange_Video: |
287 | return AVCOL_RANGE_MPEG; |
288 | case QVideoFrameFormat::ColorRange_Full: |
289 | return AVCOL_RANGE_JPEG; |
290 | default: |
291 | return AVCOL_RANGE_UNSPECIFIED; |
292 | } |
293 | } |
294 | |
295 | AVHWDeviceContext* avFrameDeviceContext(const AVFrame* frame) { |
296 | if (!frame) |
297 | return {}; |
298 | if (!frame->hw_frames_ctx) |
299 | return {}; |
300 | |
301 | const auto *frameCtx = reinterpret_cast<AVHWFramesContext *>(frame->hw_frames_ctx->data); |
302 | if (!frameCtx) |
303 | return {}; |
304 | |
305 | return frameCtx->device_ctx; |
306 | } |
307 | |
308 | SwsContextUPtr createSwsContext(const QSize &srcSize, AVPixelFormat srcPixFmt, const QSize &dstSize, |
309 | AVPixelFormat dstPixFmt, int conversionType) |
310 | { |
311 | |
312 | SwsContext *result = |
313 | sws_getContext(srcW: srcSize.width(), srcH: srcSize.height(), srcFormat: srcPixFmt, dstW: dstSize.width(), |
314 | dstH: dstSize.height(), dstFormat: dstPixFmt, flags: conversionType, srcFilter: nullptr, dstFilter: nullptr, param: nullptr); |
315 | |
316 | if (!result) |
317 | qCWarning(qLcFFmpegUtils) << "Cannot create sws context for:\n" |
318 | << "srcSize:" << srcSize |
319 | << "srcPixFmt:" << srcPixFmt |
320 | << "dstSize:" << dstSize |
321 | << "dstPixFmt:" << dstPixFmt |
322 | << "conversionType:" << conversionType; |
323 | |
324 | return SwsContextUPtr(result); |
325 | } |
326 | |
327 | #ifdef Q_OS_DARWIN |
328 | bool isCVFormatSupported(uint32_t cvFormat) |
329 | { |
330 | return av_map_videotoolbox_format_to_pixfmt(cvFormat) != AV_PIX_FMT_NONE; |
331 | } |
332 | |
333 | std::string cvFormatToString(uint32_t cvFormat) |
334 | { |
335 | auto formatDescIt = std::make_reverse_iterator(reinterpret_cast<const char *>(&cvFormat)); |
336 | return std::string(formatDescIt - 4, formatDescIt); |
337 | } |
338 | |
339 | #endif |
340 | |
341 | #if QT_FFMPEG_HAS_AVCODEC_GET_SUPPORTED_CONFIG |
342 | void logGetCodecConfigError(const AVCodec *codec, AVCodecConfig config, int error) |
343 | { |
344 | qCWarning(qLcFFmpegUtils) << "Failed to retrieve config" << config << "for codec" << codec->name |
345 | << "with error" << error << err2str(error); |
346 | } |
347 | #endif |
348 | |
349 | } // namespace QFFmpeg |
350 | |
351 | QDebug operator<<(QDebug dbg, const AVRational &value) |
352 | { |
353 | dbg << value.num << "/" << value.den; |
354 | return dbg; |
355 | } |
356 | |
357 | #if QT_FFMPEG_HAS_AV_CHANNEL_LAYOUT |
358 | QDebug operator<<(QDebug dbg, const AVChannelLayout &layout) |
359 | { |
360 | dbg << '['; |
361 | dbg << "nb_channels:" << layout.nb_channels; |
362 | dbg << ", order:" << layout.order; |
363 | |
364 | if (layout.order == AV_CHANNEL_ORDER_NATIVE || layout.order == AV_CHANNEL_ORDER_AMBISONIC) |
365 | dbg << ", mask:" << Qt::bin << layout.u.mask << Qt::dec; |
366 | else if (layout.order == AV_CHANNEL_ORDER_CUSTOM && layout.u.map) |
367 | dbg << ", id: " << layout.u.map->id; |
368 | |
369 | dbg << ']'; |
370 | |
371 | return dbg; |
372 | } |
373 | #endif |
374 | |
375 | #if QT_FFMPEG_HAS_AVCODEC_GET_SUPPORTED_CONFIG |
376 | QDebug operator<<(QDebug dbg, const AVCodecConfig value) |
377 | { |
378 | switch (value) { |
379 | case AV_CODEC_CONFIG_CHANNEL_LAYOUT: |
380 | dbg << "AV_CODEC_CONFIG_CHANNEL_LAYOUT" ; |
381 | break; |
382 | case AV_CODEC_CONFIG_COLOR_RANGE: |
383 | dbg << "AV_CODEC_CONFIG_COLOR_RANGE" ; |
384 | break; |
385 | case AV_CODEC_CONFIG_COLOR_SPACE: |
386 | dbg << "AV_CODEC_CONFIG_COLOR_SPACE" ; |
387 | break; |
388 | case AV_CODEC_CONFIG_FRAME_RATE: |
389 | dbg << "AV_CODEC_CONFIG_FRAME_RATE" ; |
390 | break; |
391 | case AV_CODEC_CONFIG_PIX_FORMAT: |
392 | dbg << "AV_CODEC_CONFIG_PIX_FORMAT" ; |
393 | break; |
394 | case AV_CODEC_CONFIG_SAMPLE_FORMAT: |
395 | dbg << "AV_CODEC_CONFIG_SAMPLE_FORMAT" ; |
396 | break; |
397 | case AV_CODEC_CONFIG_SAMPLE_RATE: |
398 | dbg << "AV_CODEC_CONFIG_SAMPLE_RATE" ; |
399 | break; |
400 | default: |
401 | dbg << "<UNKNOWN_CODEC_CONFIG>" ; |
402 | break; |
403 | } |
404 | |
405 | return dbg; |
406 | } |
407 | #endif |
408 | |
409 | QT_END_NAMESPACE |
410 | |