1 | // Copyright (C) 2021 The Qt Company Ltd. |
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | #include "qffmpegrecordingengine_p.h" |
4 | #include "qffmpegencodinginitializer_p.h" |
5 | #include "qffmpegaudioencoder_p.h" |
6 | #include "qffmpegaudioinput_p.h" |
7 | #include "qffmpegrecordingengineutils_p.h" |
8 | |
9 | #include "private/qmultimediautils_p.h" |
10 | #include "private/qplatformaudiobufferinput_p.h" |
11 | #include "private/qplatformvideosource_p.h" |
12 | #include "private/qplatformvideoframeinput_p.h" |
13 | |
14 | #include "qdebug.h" |
15 | #include "qffmpegvideoencoder_p.h" |
16 | #include "qffmpegmediametadata_p.h" |
17 | #include "qffmpegmuxer_p.h" |
18 | #include "qloggingcategory.h" |
19 | |
20 | QT_BEGIN_NAMESPACE |
21 | |
22 | static Q_LOGGING_CATEGORY(qLcFFmpegEncoder, "qt.multimedia.ffmpeg.encoder" ); |
23 | |
24 | namespace QFFmpeg |
25 | { |
26 | |
27 | RecordingEngine::RecordingEngine(const QMediaEncoderSettings &settings, |
28 | std::unique_ptr<EncodingFormatContext> context) |
29 | : m_settings(settings), m_formatContext(std::move(context)), m_muxer(new Muxer(this)) |
30 | { |
31 | Q_ASSERT(m_formatContext); |
32 | Q_ASSERT(m_formatContext->isAVIOOpen()); |
33 | } |
34 | |
35 | RecordingEngine::~RecordingEngine() |
36 | { |
37 | Q_ASSERT(m_state == State::Finalization); |
38 | } |
39 | |
40 | void RecordingEngine::addAudioInput(QFFmpegAudioInput *input) |
41 | { |
42 | Q_ASSERT(input); |
43 | Q_ASSERT(m_state == State::FormatsInitialization); |
44 | |
45 | if (input->device.isNull()) { |
46 | emit streamInitializationError(code: QMediaRecorder::ResourceError, |
47 | description: QLatin1StringView("Audio device is null" )); |
48 | return; |
49 | } |
50 | |
51 | const QAudioFormat format = input->device.preferredFormat(); |
52 | |
53 | if (!format.isValid()) { |
54 | emit streamInitializationError( |
55 | code: QMediaRecorder::FormatError, |
56 | description: QLatin1StringView("Audio device has invalid preferred format" )); |
57 | return; |
58 | } |
59 | |
60 | AudioEncoder *audioEncoder = createAudioEncoder(format); |
61 | connectEncoderToSource(encoder: audioEncoder, source: input); |
62 | |
63 | input->setRunning(true); |
64 | } |
65 | |
66 | void RecordingEngine::addAudioBufferInput(QPlatformAudioBufferInput *input, |
67 | const QAudioBuffer &firstBuffer) |
68 | { |
69 | Q_ASSERT(input); |
70 | Q_ASSERT(m_state == State::FormatsInitialization); |
71 | |
72 | const QAudioFormat format = firstBuffer.isValid() ? firstBuffer.format() : input->audioFormat(); |
73 | |
74 | AudioEncoder *audioEncoder = createAudioEncoder(format); |
75 | |
76 | // set the buffer before connecting to avoid potential races |
77 | if (firstBuffer.isValid()) |
78 | audioEncoder->addBuffer(buffer: firstBuffer); |
79 | |
80 | connectEncoderToSource(encoder: audioEncoder, source: input); |
81 | } |
82 | |
83 | AudioEncoder *RecordingEngine::createAudioEncoder(const QAudioFormat &format) |
84 | { |
85 | Q_ASSERT(format.isValid()); |
86 | |
87 | auto audioEncoder = new AudioEncoder(*this, format, m_settings); |
88 | |
89 | m_audioEncoders.emplace_back(args&: audioEncoder); |
90 | connect(sender: audioEncoder, signal: &EncoderThread::endOfSourceStream, context: this, |
91 | slot: &RecordingEngine::handleSourceEndOfStream); |
92 | connect(sender: audioEncoder, signal: &EncoderThread::initialized, context: this, |
93 | slot: &RecordingEngine::handleEncoderInitialization, type: Qt::SingleShotConnection); |
94 | if (m_autoStop) |
95 | audioEncoder->setAutoStop(true); |
96 | |
97 | return audioEncoder; |
98 | } |
99 | |
100 | void RecordingEngine::addVideoSource(QPlatformVideoSource *source, const QVideoFrame &firstFrame) |
101 | { |
102 | Q_ASSERT(m_state == State::FormatsInitialization); |
103 | |
104 | QVideoFrameFormat frameFormat = |
105 | firstFrame.isValid() ? firstFrame.surfaceFormat() : source->frameFormat(); |
106 | |
107 | Q_ASSERT(frameFormat.isValid()); |
108 | |
109 | if (firstFrame.isValid() && frameFormat.streamFrameRate() <= 0.f) { |
110 | const qint64 startTime = firstFrame.startTime(); |
111 | const qint64 endTime = firstFrame.endTime(); |
112 | if (startTime != -1 && endTime > startTime) |
113 | frameFormat.setStreamFrameRate(static_cast<qreal>(VideoFrameTimeBase) |
114 | / (endTime - startTime)); |
115 | } |
116 | |
117 | std::optional<AVPixelFormat> hwPixelFormat = source->ffmpegHWPixelFormat() |
118 | ? AVPixelFormat(*source->ffmpegHWPixelFormat()) |
119 | : std::optional<AVPixelFormat>{}; |
120 | |
121 | qCDebug(qLcFFmpegEncoder) << "adding video source" << source->metaObject()->className() << ":" |
122 | << "pixelFormat=" << frameFormat.pixelFormat() |
123 | << "frameSize=" << frameFormat.frameSize() |
124 | << "frameRate=" << frameFormat.streamFrameRate() |
125 | << "ffmpegHWPixelFormat=" << (hwPixelFormat ? *hwPixelFormat : AV_PIX_FMT_NONE); |
126 | |
127 | auto videoEncoder = new VideoEncoder(*this, m_settings, frameFormat, hwPixelFormat); |
128 | m_videoEncoders.emplace_back(args&: videoEncoder); |
129 | if (m_autoStop) |
130 | videoEncoder->setAutoStop(true); |
131 | |
132 | connect(sender: videoEncoder, signal: &EncoderThread::endOfSourceStream, context: this, |
133 | slot: &RecordingEngine::handleSourceEndOfStream); |
134 | |
135 | connect(sender: videoEncoder, signal: &EncoderThread::initialized, context: this, |
136 | slot: &RecordingEngine::handleEncoderInitialization, type: Qt::SingleShotConnection); |
137 | |
138 | // set the frame before connecting to avoid potential races |
139 | if (firstFrame.isValid()) |
140 | videoEncoder->addFrame(frame: firstFrame); |
141 | |
142 | connectEncoderToSource(encoder: videoEncoder, source); |
143 | } |
144 | |
145 | void RecordingEngine::handleFormatsInitialization() |
146 | { |
147 | Q_ASSERT(m_state == State::FormatsInitialization); |
148 | Q_ASSERT(m_formatsInitializer); |
149 | m_formatsInitializer.reset(); |
150 | |
151 | if (m_audioEncoders.empty() && m_videoEncoders.empty()) { |
152 | emit sessionError(code: QMediaRecorder::ResourceError, |
153 | description: QLatin1StringView("No valid stream found for encoding" )); |
154 | return; |
155 | } |
156 | |
157 | m_state = State::EncodersInitialization; |
158 | |
159 | qCDebug(qLcFFmpegEncoder) << "RecordingEngine::start!" ; |
160 | |
161 | forEachEncoder(f: [](EncoderThread *encoder) { encoder->start(); }); |
162 | } |
163 | |
164 | void RecordingEngine::initialize(const std::vector<QPlatformAudioBufferInputBase *> &audioSources, |
165 | const std::vector<QPlatformVideoSource *> &videoSources) |
166 | { |
167 | qCDebug(qLcFFmpegEncoder) << ">>>>>>>>>>>>>>> initialize" ; |
168 | Q_ASSERT(m_state == State::None); |
169 | |
170 | m_state = State::FormatsInitialization; |
171 | m_formatsInitializer = std::make_unique<EncodingInitializer>(args&: *this); |
172 | m_formatsInitializer->start(audioSources, videoSources); |
173 | } |
174 | |
175 | RecordingEngine::EncodingFinalizer::EncodingFinalizer(RecordingEngine &recordingEngine, |
176 | bool writeTrailer) |
177 | : m_recordingEngine(recordingEngine), m_writeTrailer(writeTrailer) |
178 | { |
179 | Q_ASSERT(m_recordingEngine.m_state == State::Finalization); |
180 | connect(sender: this, signal: &QThread::finished, context: this, slot: &QObject::deleteLater); |
181 | } |
182 | |
183 | void RecordingEngine::EncodingFinalizer::run() |
184 | { |
185 | Q_ASSERT(m_recordingEngine.m_state == State::Finalization); |
186 | |
187 | m_recordingEngine.stopAndDeleteThreads(); |
188 | |
189 | if (m_writeTrailer) { |
190 | const int res = av_write_trailer(s: m_recordingEngine.avFormatContext()); |
191 | if (res < 0) { |
192 | const auto errorDescription = err2str(errnum: res); |
193 | qCWarning(qLcFFmpegEncoder) << "could not write trailer" << res << errorDescription; |
194 | emit m_recordingEngine.sessionError(code: QMediaRecorder::FormatError, |
195 | description: QLatin1String("Cannot write trailer: " ) |
196 | + errorDescription); |
197 | } |
198 | } |
199 | // else ffmpeg might crash |
200 | |
201 | // close AVIO before emitting finalizationDone. |
202 | m_recordingEngine.m_formatContext->closeAVIO(); |
203 | |
204 | qCDebug(qLcFFmpegEncoder) << " done finalizing." ; |
205 | emit m_recordingEngine.finalizationDone(); |
206 | auto recordingEnginePtr = &m_recordingEngine; |
207 | delete recordingEnginePtr; |
208 | } |
209 | |
210 | void RecordingEngine::finalize() |
211 | { |
212 | qCDebug(qLcFFmpegEncoder) << ">>>>>>>>>>>>>>> finalize" ; |
213 | |
214 | Q_ASSERT(m_state == State::FormatsInitialization || m_state == State::EncodersInitialization |
215 | || m_state == State::Encoding); |
216 | |
217 | Q_ASSERT((m_state == State::FormatsInitialization) == !!m_formatsInitializer); |
218 | |
219 | m_formatsInitializer.reset(); |
220 | |
221 | forEachEncoder(f: &disconnectEncoderFromSource); |
222 | if (m_state != State::Encoding) |
223 | forEachEncoder(f: &EncoderThread::startEncoding, args: false); |
224 | |
225 | const bool shouldWriteTrailer = m_state == State::Encoding; |
226 | m_state = State::Finalization; |
227 | |
228 | EncodingFinalizer *finalizer = new EncodingFinalizer(*this, shouldWriteTrailer); |
229 | finalizer->start(); |
230 | } |
231 | |
232 | void RecordingEngine::setPaused(bool paused) |
233 | { |
234 | forEachEncoder(f: &EncoderThread::setPaused, args&: paused); |
235 | } |
236 | |
237 | void RecordingEngine::setAutoStop(bool autoStop) |
238 | { |
239 | m_autoStop = autoStop; |
240 | forEachEncoder(f: &EncoderThread::setAutoStop, args&: autoStop); |
241 | handleSourceEndOfStream(); |
242 | } |
243 | |
244 | void RecordingEngine::setMetaData(const QMediaMetaData &metaData) |
245 | { |
246 | m_metaData = metaData; |
247 | } |
248 | |
249 | void RecordingEngine::newTimeStamp(qint64 time) |
250 | { |
251 | QMutexLocker locker(&m_timeMutex); |
252 | if (time > m_timeRecorded) { |
253 | m_timeRecorded = time; |
254 | emit durationChanged(duration: time); |
255 | } |
256 | } |
257 | |
258 | bool RecordingEngine::isEndOfSourceStreams() const |
259 | { |
260 | return allOfEncoders(f: &EncoderThread::isEndOfSourceStream); |
261 | } |
262 | |
263 | void RecordingEngine::handleSourceEndOfStream() |
264 | { |
265 | if (m_autoStop && isEndOfSourceStreams()) |
266 | emit autoStopped(); |
267 | } |
268 | |
269 | void RecordingEngine::handleEncoderInitialization() |
270 | { |
271 | Q_ASSERT(m_state == State::EncodersInitialization || m_state == State::Finalization); |
272 | |
273 | if (m_state == State::Finalization) |
274 | return; // outdated event, drop it |
275 | |
276 | ++m_initializedEncodersCount; |
277 | |
278 | Q_ASSERT(m_initializedEncodersCount <= encodersCount()); |
279 | |
280 | if (m_initializedEncodersCount < encodersCount()) |
281 | return; |
282 | |
283 | Q_ASSERT(allOfEncoders(&EncoderThread::isInitialized)); |
284 | |
285 | qCDebug(qLcFFmpegEncoder) << "Encoders initialized; writing a header" ; |
286 | |
287 | avFormatContext()->metadata = QFFmpegMetaData::toAVMetaData(metaData: m_metaData); |
288 | |
289 | const int res = avformat_write_header(s: avFormatContext(), options: nullptr); |
290 | if (res < 0) { |
291 | qWarning() << "could not write header, error:" << res << err2str(errnum: res); |
292 | emit sessionError(code: QMediaRecorder::ResourceError, |
293 | description: QLatin1StringView("Cannot start writing the stream" )); |
294 | return; |
295 | } |
296 | |
297 | qCDebug(qLcFFmpegEncoder) << "stream header is successfully written" ; |
298 | |
299 | m_state = State::Encoding; |
300 | m_muxer->start(); |
301 | forEachEncoder(f: &EncoderThread::startEncoding, args: true); |
302 | } |
303 | |
304 | void RecordingEngine::stopAndDeleteThreads() |
305 | { |
306 | m_audioEncoders.clear(); |
307 | m_videoEncoders.clear(); |
308 | m_muxer.reset(); |
309 | } |
310 | |
311 | template <typename F, typename... Args> |
312 | void RecordingEngine::forEachEncoder(F &&f, Args &&...args) |
313 | { |
314 | for (auto &audioEncoder : m_audioEncoders) |
315 | std::invoke(f, audioEncoder.get(), args...); |
316 | for (auto &videoEncoder : m_videoEncoders) |
317 | std::invoke(f, videoEncoder.get(), args...); |
318 | } |
319 | |
320 | template <typename F> |
321 | bool RecordingEngine::allOfEncoders(F &&f) const |
322 | { |
323 | auto predicate = [&f](const auto &encoder) { return std::invoke(f, encoder.get()); }; |
324 | |
325 | return std::all_of(m_audioEncoders.cbegin(), m_audioEncoders.cend(), predicate) |
326 | && std::all_of(m_videoEncoders.cbegin(), m_videoEncoders.cend(), predicate); |
327 | } |
328 | } |
329 | |
330 | QT_END_NAMESPACE |
331 | |
332 | #include "moc_qffmpegrecordingengine_p.cpp" |
333 | |