1// Copyright (C) 2022 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "qffmpegvideoframeencoder_p.h"
5#include "qffmpegmediaformatinfo_p.h"
6#include "qffmpegencoderoptions_p.h"
7#include "qffmpegvideoencoderutils_p.h"
8#include "qffmpegcodecstorage_p.h"
9#include <qloggingcategory.h>
10#include <QtMultimedia/private/qmaybe_p.h>
11
12extern "C" {
13#include "libavutil/display.h"
14#include "libavutil/pixdesc.h"
15}
16
17QT_BEGIN_NAMESPACE
18
19static Q_LOGGING_CATEGORY(qLcVideoFrameEncoder, "qt.multimedia.ffmpeg.videoencoder");
20
21namespace QFFmpeg {
22
23namespace {
24
25AVCodecID avCodecID(const QMediaEncoderSettings &settings)
26{
27 const QMediaFormat::VideoCodec qVideoCodec = settings.videoCodec();
28 return QFFmpegMediaFormatInfo::codecIdForVideoCodec(codec: qVideoCodec);
29}
30
31} // namespace
32
33VideoFrameEncoderUPtr VideoFrameEncoder::create(const QMediaEncoderSettings &encoderSettings,
34 const SourceParams &sourceParams,
35 AVFormatContext *formatContext)
36{
37 Q_ASSERT(isSwPixelFormat(sourceParams.swFormat));
38 Q_ASSERT(isHwPixelFormat(sourceParams.format) || sourceParams.swFormat == sourceParams.format);
39
40 AVStream *stream = createStream(sourceParams, formatContext);
41
42 if (!stream)
43 return nullptr;
44
45 CreationResult result;
46
47 auto findAndOpenAVEncoder = [&](const auto &scoresGetter, const auto &creator) {
48 auto createWithTargetFormatFallback = [&](const Codec &codec) {
49 result = creator(codec, AVPixelFormatSet{});
50#ifdef Q_OS_ANDROID
51 // On Android some encoders fail to open encoders with 4:2:0 formats unless it's NV12.
52 // Let's fallback to another format.
53 if (!result.encoder) {
54 const auto targetFormatDesc = av_pix_fmt_desc_get(result.targetFormat);
55 const bool is420TargetFormat = targetFormatDesc
56 && targetFormatDesc->log2_chroma_h == 1
57 && targetFormatDesc->log2_chroma_w == 1;
58 if (is420TargetFormat && result.targetFormat != AV_PIX_FMT_NV12)
59 result = creator(codec, AVPixelFormatSet{ result.targetFormat });
60 }
61#endif
62 return result.encoder != nullptr;
63 };
64 return QFFmpeg::findAndOpenAVEncoder(codecId: avCodecID(settings: encoderSettings), scoresGetter,
65 codecOpener: createWithTargetFormatFallback);
66 };
67
68 {
69 const auto &deviceTypes = HWAccel::encodingDeviceTypes();
70
71 auto findDeviceType = [&](const Codec &codec) {
72 std::optional<AVPixelFormat> pixelFormat = findAVPixelFormat(codec, predicate: &isHwPixelFormat);
73 if (!pixelFormat)
74 return deviceTypes.end();
75
76 return std::find_if(first: deviceTypes.begin(), last: deviceTypes.end(),
77 pred: [pixelFormat](AVHWDeviceType deviceType) {
78 return pixelFormatForHwDevice(deviceType) == pixelFormat;
79 });
80 };
81
82 findAndOpenAVEncoder(
83 [&](const Codec &codec) {
84 const auto found = findDeviceType(codec);
85 if (found == deviceTypes.end())
86 return NotSuitableAVScore;
87
88 return DefaultAVScore - static_cast<AVScore>(found - deviceTypes.begin());
89 },
90 [&](const Codec &codec, const AVPixelFormatSet &prohibitedTargetFormats) {
91 HWAccelUPtr hwAccel = HWAccel::create(deviceType: *findDeviceType(codec));
92 if (!hwAccel)
93 return CreationResult{};
94 if (!hwAccel->matchesSizeContraints(size: encoderSettings.videoResolution()))
95 return CreationResult{};
96 return create(stream, codec, hwAccel: std::move(hwAccel), sourceParams, encoderSettings,
97 prohibitedTargetFormats);
98 });
99 }
100
101 if (!result.encoder) {
102 findAndOpenAVEncoder(
103 [&](const Codec &codec) {
104 return findSWFormatScores(codec, sourceSWFormat: sourceParams.swFormat);
105 },
106 [&](const Codec &codec, const AVPixelFormatSet &prohibitedTargetFormats) {
107 return create(stream, codec, hwAccel: nullptr, sourceParams, encoderSettings,
108 prohibitedTargetFormats);
109 });
110 }
111
112 if (auto &encoder = result.encoder)
113 qCDebug(qLcVideoFrameEncoder)
114 << "found" << (encoder->m_accel ? "hw" : "sw") << "encoder"
115 << encoder->m_codec.name() << "for id" << encoder->m_codec.id();
116 else
117 qCWarning(qLcVideoFrameEncoder) << "No valid video codecs found";
118
119 return std::move(result.encoder);
120}
121
122VideoFrameEncoder::VideoFrameEncoder(AVStream *stream, const Codec &codec, HWAccelUPtr hwAccel,
123 const SourceParams &sourceParams,
124 const QMediaEncoderSettings &encoderSettings)
125 : m_settings(encoderSettings),
126 m_stream(stream),
127 m_codec(codec),
128 m_accel(std::move(hwAccel)),
129 m_sourceSize(sourceParams.size),
130 m_sourceFormat(sourceParams.format),
131 m_sourceSWFormat(sourceParams.swFormat)
132{
133}
134
135AVStream *VideoFrameEncoder::createStream(const SourceParams &sourceParams,
136 AVFormatContext *formatContext)
137{
138 AVStream *stream = avformat_new_stream(s: formatContext, c: nullptr);
139
140 if (!stream)
141 return stream;
142
143 stream->id = formatContext->nb_streams - 1;
144 stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
145
146 stream->codecpar->color_trc = sourceParams.colorTransfer;
147 stream->codecpar->color_space = sourceParams.colorSpace;
148 stream->codecpar->color_range = sourceParams.colorRange;
149
150 if (sourceParams.transform.rotation != QtVideo::Rotation::None || sourceParams.transform.mirrorredHorizontallyAfterRotation) {
151 constexpr auto displayMatrixSize = sizeof(int32_t) * 9;
152 AVPacketSideData sideData = { .data: reinterpret_cast<uint8_t *>(av_malloc(size: displayMatrixSize)),
153 .size: displayMatrixSize, .type: AV_PKT_DATA_DISPLAYMATRIX };
154 int32_t *matrix = reinterpret_cast<int32_t *>(sideData.data);
155 av_display_rotation_set(matrix, angle: static_cast<double>(sourceParams.transform.rotation));
156 if (sourceParams.transform.mirrorredHorizontallyAfterRotation)
157 av_display_matrix_flip(matrix, hflip: sourceParams.transform.mirrorredHorizontallyAfterRotation, vflip: false);
158
159 addStreamSideData(stream, sideData);
160 }
161
162 return stream;
163}
164
165VideoFrameEncoder::CreationResult
166VideoFrameEncoder::create(AVStream *stream, const Codec &codec, HWAccelUPtr hwAccel,
167 const SourceParams &sourceParams,
168 const QMediaEncoderSettings &encoderSettings,
169 const AVPixelFormatSet &prohibitedTargetFormats)
170{
171 VideoFrameEncoderUPtr frameEncoder(new VideoFrameEncoder(stream, codec, std::move(hwAccel),
172 sourceParams, encoderSettings));
173 frameEncoder->initTargetSize();
174
175 frameEncoder->initCodecFrameRate();
176
177 if (!frameEncoder->initTargetFormats(prohibitedTargetFormats))
178 return {};
179
180 frameEncoder->initStream();
181
182 const AVPixelFormat targetFormat = frameEncoder->m_targetFormat;
183
184 if (!frameEncoder->initCodecContext())
185 return { .encoder: nullptr, .targetFormat: targetFormat };
186
187 if (!frameEncoder->open())
188 return { .encoder: nullptr, .targetFormat: targetFormat };
189
190 frameEncoder->updateConversions();
191
192 return { .encoder: std::move(frameEncoder), .targetFormat: targetFormat };
193}
194
195void VideoFrameEncoder::initTargetSize()
196{
197 m_targetSize = adjustVideoResolution(codec: m_codec, requestedResolution: m_settings.videoResolution());
198
199#ifdef Q_OS_WINDOWS
200 // TODO: investigate, there might be more encoders not supporting odd resolution
201 if (m_codec.name() == u"h264_mf") {
202 auto makeEven = [](int size) { return size & ~1; };
203 const QSize fixedSize(makeEven(m_targetSize.width()), makeEven(m_targetSize.height()));
204 if (fixedSize != m_targetSize) {
205 qCDebug(qLcVideoFrameEncoder) << "Fix odd video resolution for codec" << m_codec.name()
206 << ":" << m_targetSize << "->" << fixedSize;
207 m_targetSize = fixedSize;
208 }
209 }
210#endif
211}
212
213void VideoFrameEncoder::initCodecFrameRate()
214{
215 const auto frameRates = m_codec.frameRates();
216 if (qLcVideoFrameEncoder().isEnabled(type: QtDebugMsg))
217 for (AVRational rate : frameRates)
218 qCDebug(qLcVideoFrameEncoder) << "supported frame rate:" << rate;
219
220 m_codecFrameRate = adjustFrameRate(supportedRates: frameRates, requestedRate: m_settings.videoFrameRate());
221 qCDebug(qLcVideoFrameEncoder) << "Adjusted frame rate:" << m_codecFrameRate;
222}
223
224bool VideoFrameEncoder::initTargetFormats(const AVPixelFormatSet &prohibitedTargetFormats)
225{
226 const auto format = findTargetFormat(sourceFormat: m_sourceFormat, sourceSWFormat: m_sourceSWFormat, codec: m_codec, accel: m_accel.get(),
227 prohibitedFormats: prohibitedTargetFormats);
228
229 if (!format) {
230 qWarning() << "Could not find target format for codecId" << m_codec.id();
231 return false;
232 }
233
234 m_targetFormat = *format;
235
236 if (isHwPixelFormat(format: m_targetFormat)) {
237 Q_ASSERT(m_accel);
238
239 // don't pass prohibitedTargetFormats here as m_targetSWFormat is the format,
240 // from which we load a hardware texture, and the format doesn't impact on encoding.
241 const auto swFormat = findTargetSWFormat(sourceSWFormat: m_sourceSWFormat, codec: m_codec, accel: *m_accel);
242 if (!swFormat) {
243 qWarning() << "Cannot find software target format. sourceSWFormat:" << m_sourceSWFormat
244 << "targetFormat:" << m_targetFormat;
245 return false;
246 }
247
248 m_targetSWFormat = *swFormat;
249
250 m_accel->createFramesContext(swFormat: m_targetSWFormat, size: m_targetSize);
251 if (!m_accel->hwFramesContextAsBuffer())
252 return false;
253 } else {
254 m_targetSWFormat = m_targetFormat;
255 }
256
257 return true;
258}
259
260VideoFrameEncoder::~VideoFrameEncoder() = default;
261
262void VideoFrameEncoder::initStream()
263{
264 m_stream->codecpar->codec_id = m_codec.id();
265
266 // Apples HEVC decoders don't like the hev1 tag ffmpeg uses by default, use hvc1 as the more
267 // commonly accepted tag
268 if (m_codec.id() == AV_CODEC_ID_HEVC)
269 m_stream->codecpar->codec_tag = MKTAG('h', 'v', 'c', '1');
270 else
271 m_stream->codecpar->codec_tag = 0;
272
273 // ### Fix hardcoded values
274 m_stream->codecpar->format = m_targetFormat;
275 m_stream->codecpar->width = m_targetSize.width();
276 m_stream->codecpar->height = m_targetSize.height();
277 m_stream->codecpar->sample_aspect_ratio = AVRational{ .num: 1, .den: 1 };
278#if QT_CODEC_PARAMETERS_HAVE_FRAMERATE
279 m_stream->codecpar->framerate = m_codecFrameRate;
280#endif
281
282 const auto frameRates = m_codec.frameRates();
283 m_stream->time_base = adjustFrameTimeBase(supportedRates: frameRates, frameRate: m_codecFrameRate);
284}
285
286bool VideoFrameEncoder::initCodecContext()
287{
288 Q_ASSERT(m_stream->codecpar->codec_id);
289
290 m_codecContext.reset(p: avcodec_alloc_context3(codec: m_codec.get()));
291 if (!m_codecContext) {
292 qWarning() << "Could not allocate codec context";
293 return false;
294 }
295
296 // copies format, size, color params, framerate
297 avcodec_parameters_to_context(codec: m_codecContext.get(), par: m_stream->codecpar);
298#if !QT_CODEC_PARAMETERS_HAVE_FRAMERATE
299 m_codecContext->framerate = m_codecFrameRate;
300#endif
301 m_codecContext->time_base = m_stream->time_base;
302 qCDebug(qLcVideoFrameEncoder) << "codecContext time base" << m_codecContext->time_base.num
303 << m_codecContext->time_base.den;
304
305 if (m_accel) {
306 auto deviceContext = m_accel->hwDeviceContextAsBuffer();
307 Q_ASSERT(deviceContext);
308 m_codecContext->hw_device_ctx = av_buffer_ref(buf: deviceContext);
309
310 if (auto framesContext = m_accel->hwFramesContextAsBuffer())
311 m_codecContext->hw_frames_ctx = av_buffer_ref(buf: framesContext);
312 }
313
314 avcodec_parameters_from_context(par: m_stream->codecpar, codec: m_codecContext.get());
315
316 return true;
317}
318
319bool VideoFrameEncoder::open()
320{
321 Q_ASSERT(m_codecContext);
322
323 AVDictionaryHolder opts;
324 applyVideoEncoderOptions(settings: m_settings, codecName: QByteArray{ m_codec.name() }, codec: m_codecContext.get(), opts);
325 applyExperimentalCodecOptions(codec: m_codec, opts);
326
327 const int res = avcodec_open2(avctx: m_codecContext.get(), codec: m_codec.get(), options: opts);
328 if (res < 0) {
329 qCWarning(qLcVideoFrameEncoder)
330 << "Couldn't open video encoder" << m_codec.name() << "; result:" << err2str(errnum: res);
331 return false;
332 }
333 qCDebug(qLcVideoFrameEncoder) << "video codec opened" << res << "time base"
334 << m_codecContext->time_base;
335 return true;
336}
337
338qreal VideoFrameEncoder::codecFrameRate() const
339{
340 return m_codecFrameRate.den ? qreal(m_codecFrameRate.num) / m_codecFrameRate.den : 0.;
341}
342
343qint64 VideoFrameEncoder::getPts(qint64 us) const
344{
345 qint64 div = 1'000'000 * m_stream->time_base.num;
346 return div != 0 ? (us * m_stream->time_base.den + div / 2) / div : 0;
347}
348
349const AVRational &VideoFrameEncoder::getTimeBase() const
350{
351 return m_stream->time_base;
352}
353
354namespace {
355struct FrameConverter
356{
357 FrameConverter(AVFrameUPtr inputFrame) : m_inputFrame{ std::move(inputFrame) } { }
358
359 int downloadFromHw()
360 {
361 AVFrameUPtr cpuFrame = makeAVFrame();
362
363 int err = av_hwframe_transfer_data(dst: cpuFrame.get(), src: currentFrame(), flags: 0);
364 if (err < 0) {
365 qCDebug(qLcVideoFrameEncoder)
366 << "Error transferring frame data to surface." << err2str(errnum: err);
367 return err;
368 }
369
370 setFrame(std::move(cpuFrame));
371 return 0;
372 }
373
374 void convert(SwsContext *scaleContext, AVPixelFormat format, const QSize &size)
375 {
376 AVFrameUPtr scaledFrame = makeAVFrame();
377
378 scaledFrame->format = format;
379 scaledFrame->width = size.width();
380 scaledFrame->height = size.height();
381
382 av_frame_get_buffer(frame: scaledFrame.get(), align: 0);
383
384 const AVFrame *srcFrame = currentFrame();
385
386 const auto scaledHeight =
387 sws_scale(c: scaleContext, srcSlice: srcFrame->data, srcStride: srcFrame->linesize, srcSliceY: 0, srcSliceH: srcFrame->height,
388 dst: scaledFrame->data, dstStride: scaledFrame->linesize);
389
390 if (scaledHeight != scaledFrame->height)
391 qCWarning(qLcVideoFrameEncoder)
392 << "Scaled height" << scaledHeight << "!=" << scaledFrame->height;
393
394 setFrame(std::move(scaledFrame));
395 }
396
397 int uploadToHw(HWAccel *accel)
398 {
399 auto *hwFramesContext = accel->hwFramesContextAsBuffer();
400 Q_ASSERT(hwFramesContext);
401 AVFrameUPtr hwFrame = makeAVFrame();
402 if (!hwFrame)
403 return AVERROR(ENOMEM);
404
405 int err = av_hwframe_get_buffer(hwframe_ctx: hwFramesContext, frame: hwFrame.get(), flags: 0);
406 if (err < 0) {
407 qCDebug(qLcVideoFrameEncoder) << "Error getting HW buffer" << err2str(errnum: err);
408 return err;
409 } else {
410 qCDebug(qLcVideoFrameEncoder) << "got HW buffer";
411 }
412 if (!hwFrame->hw_frames_ctx) {
413 qCDebug(qLcVideoFrameEncoder) << "no hw frames context";
414 return AVERROR(ENOMEM);
415 }
416 err = av_hwframe_transfer_data(dst: hwFrame.get(), src: currentFrame(), flags: 0);
417 if (err < 0) {
418 qCDebug(qLcVideoFrameEncoder)
419 << "Error transferring frame data to surface." << err2str(errnum: err);
420 return err;
421 }
422
423 setFrame(std::move(hwFrame));
424
425 return 0;
426 }
427
428 QMaybe<AVFrameUPtr, int> takeResultFrame()
429 {
430 // Ensure that object is reset to empty state
431 AVFrameUPtr converted = std::move(m_convertedFrame);
432 AVFrameUPtr input = std::move(m_inputFrame);
433
434 if (!converted)
435 return input;
436
437 // Copy metadata except size and format from input frame
438 const int status = av_frame_copy_props(dst: converted.get(), src: input.get());
439 if (status != 0)
440 return status;
441
442 return converted;
443 }
444
445private:
446 void setFrame(AVFrameUPtr frame) { m_convertedFrame = std::move(frame); }
447
448 AVFrame *currentFrame() const
449 {
450 if (m_convertedFrame)
451 return m_convertedFrame.get();
452 return m_inputFrame.get();
453 }
454
455 AVFrameUPtr m_inputFrame;
456 AVFrameUPtr m_convertedFrame;
457};
458} // namespace
459
460int VideoFrameEncoder::sendFrame(AVFrameUPtr inputFrame)
461{
462 if (!m_codecContext) {
463 qWarning() << "codec context is not initialized!";
464 return AVERROR(EINVAL);
465 }
466
467 if (!inputFrame)
468 return avcodec_send_frame(avctx: m_codecContext.get(), frame: nullptr); // Flush
469
470 if (!updateSourceFormatAndSize(frame: inputFrame.get()))
471 return AVERROR(EINVAL);
472
473 FrameConverter converter{ std::move(inputFrame) };
474
475 if (m_downloadFromHW) {
476 const int status = converter.downloadFromHw();
477 if (status != 0)
478 return status;
479 }
480
481 if (m_scaleContext)
482 converter.convert(scaleContext: m_scaleContext.get(), format: m_targetSWFormat, size: m_targetSize);
483
484 if (m_uploadToHW) {
485 const int status = converter.uploadToHw(accel: m_accel.get());
486 if (status != 0)
487 return status;
488 }
489
490 const QMaybe<AVFrameUPtr, int> resultFrame = converter.takeResultFrame();
491 if (!resultFrame)
492 return resultFrame.error();
493
494 AVRational timeBase{};
495 int64_t pts{};
496 getAVFrameTime(frame: *resultFrame.value(), pts, timeBase);
497 qCDebug(qLcVideoFrameEncoder) << "sending frame" << pts << "*" << timeBase;
498
499 return avcodec_send_frame(avctx: m_codecContext.get(), frame: resultFrame.value().get());
500}
501
502qint64 VideoFrameEncoder::estimateDuration(const AVPacket &packet, bool isFirstPacket)
503{
504 qint64 duration = 0; // In stream units, multiply by time_base to get seconds
505
506 if (isFirstPacket) {
507 // First packet - Estimate duration from frame rate. Duration must
508 // be set for single-frame videos, otherwise they won't open in
509 // media player.
510 const AVRational frameDuration = av_inv_q(q: m_codecContext->framerate);
511 duration = av_rescale_q(a: 1, bq: frameDuration, cq: m_stream->time_base);
512 } else {
513 // Duration is calculated from actual packet times. TODO: Handle discontinuities
514 duration = packet.pts - m_lastPacketTime;
515 }
516
517 return duration;
518}
519
520AVPacketUPtr VideoFrameEncoder::retrievePacket()
521{
522 if (!m_codecContext)
523 return nullptr;
524
525 auto getPacket = [&]() {
526 AVPacketUPtr packet(av_packet_alloc());
527 const int ret = avcodec_receive_packet(avctx: m_codecContext.get(), avpkt: packet.get());
528 if (ret < 0) {
529 if (ret != AVERROR(EOF) && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
530 qCDebug(qLcVideoFrameEncoder) << "Error receiving packet" << ret << err2str(errnum: ret);
531 return AVPacketUPtr{};
532 }
533 auto ts = timeStampMs(ts: packet->pts, base: m_stream->time_base);
534
535 qCDebug(qLcVideoFrameEncoder)
536 << "got a packet" << packet->pts << packet->dts << (ts ? *ts : 0);
537
538 packet->stream_index = m_stream->id;
539
540 if (packet->duration == 0) {
541 const bool firstFrame = m_lastPacketTime == AV_NOPTS_VALUE;
542 packet->duration = estimateDuration(packet: *packet, isFirstPacket: firstFrame);
543 }
544
545 m_lastPacketTime = packet->pts;
546
547 return packet;
548 };
549
550 auto fixPacketDts = [&](AVPacket &packet) {
551 // Workaround for some ffmpeg codecs bugs (e.g. nvenc)
552 // Ideally, packet->pts < packet->dts is not expected
553
554 if (packet.dts == AV_NOPTS_VALUE)
555 return true;
556
557 packet.dts -= m_packetDtsOffset;
558
559 if (packet.pts != AV_NOPTS_VALUE && packet.pts < packet.dts) {
560 m_packetDtsOffset += packet.dts - packet.pts;
561 packet.dts = packet.pts;
562
563 if (m_prevPacketDts != AV_NOPTS_VALUE && packet.dts < m_prevPacketDts) {
564 qCWarning(qLcVideoFrameEncoder)
565 << "Skip packet; failed to fix dts:" << packet.dts << m_prevPacketDts;
566 return false;
567 }
568 }
569
570 m_prevPacketDts = packet.dts;
571
572 return true;
573 };
574
575 while (auto packet = getPacket()) {
576 if (fixPacketDts(*packet))
577 return packet;
578 }
579
580 return nullptr;
581}
582
583bool VideoFrameEncoder::updateSourceFormatAndSize(const AVFrame *frame)
584{
585 Q_ASSERT(frame);
586
587 const QSize frameSize(frame->width, frame->height);
588 const AVPixelFormat frameFormat = static_cast<AVPixelFormat>(frame->format);
589
590 if (frameSize == m_sourceSize && frameFormat == m_sourceFormat)
591 return true;
592
593 auto applySourceFormatAndSize = [&](AVPixelFormat swFormat) {
594 m_sourceSize = frameSize;
595 m_sourceFormat = frameFormat;
596 m_sourceSWFormat = swFormat;
597 updateConversions();
598 return true;
599 };
600
601 if (frameFormat == m_sourceFormat)
602 return applySourceFormatAndSize(m_sourceSWFormat);
603
604 if (frameFormat == AV_PIX_FMT_NONE) {
605 qWarning() << "Got a frame with invalid pixel format";
606 return false;
607 }
608
609 if (isSwPixelFormat(format: frameFormat))
610 return applySourceFormatAndSize(frameFormat);
611
612 auto framesCtx = reinterpret_cast<const AVHWFramesContext *>(frame->hw_frames_ctx->data);
613 if (!framesCtx || framesCtx->sw_format == AV_PIX_FMT_NONE) {
614 qWarning() << "Cannot update conversions as hw frame has invalid framesCtx" << framesCtx;
615 return false;
616 }
617
618 return applySourceFormatAndSize(framesCtx->sw_format);
619}
620
621void VideoFrameEncoder::updateConversions()
622{
623 const bool needToScale = m_sourceSize != m_targetSize;
624 const bool zeroCopy = m_sourceFormat == m_targetFormat && !needToScale;
625
626 m_scaleContext.reset();
627
628 if (zeroCopy) {
629 m_downloadFromHW = false;
630 m_uploadToHW = false;
631
632 qCDebug(qLcVideoFrameEncoder) << "zero copy encoding, format" << m_targetFormat;
633 // no need to initialize any converters
634 return;
635 }
636
637 m_downloadFromHW = m_sourceFormat != m_sourceSWFormat;
638 m_uploadToHW = m_targetFormat != m_targetSWFormat;
639
640 if (m_sourceSWFormat != m_targetSWFormat || needToScale) {
641 qCDebug(qLcVideoFrameEncoder)
642 << "video source and encoder use different formats:" << m_sourceSWFormat
643 << m_targetSWFormat << "or sizes:" << m_sourceSize << m_targetSize;
644
645 const int conversionType = getScaleConversionType(sourceSize: m_sourceSize, targetSize: m_targetSize);
646
647 m_scaleContext = createSwsContext(srcSize: m_sourceSize, srcPixFmt: m_sourceSWFormat, dstSize: m_targetSize,
648 dstPixFmt: m_targetSWFormat, conversionType);
649 }
650
651 qCDebug(qLcVideoFrameEncoder) << "VideoFrameEncoder conversions initialized:"
652 << "sourceFormat:" << m_sourceFormat
653 << (isHwPixelFormat(format: m_sourceFormat) ? "(hw)" : "(sw)")
654 << "targetFormat:" << m_targetFormat
655 << (isHwPixelFormat(format: m_targetFormat) ? "(hw)" : "(sw)")
656 << "sourceSWFormat:" << m_sourceSWFormat
657 << "targetSWFormat:" << m_targetSWFormat
658 << "scaleContext:" << m_scaleContext.get();
659}
660
661} // namespace QFFmpeg
662
663QT_END_NAMESPACE
664

source code of qtmultimedia/src/plugins/multimedia/ffmpeg/recordingengine/qffmpegvideoframeencoder.cpp