1 | // Copyright (C) 2016 The Qt Company Ltd. |
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "qffmpegmediacapturesession_p.h" |
5 | |
6 | #include "private/qplatformaudioinput_p.h" |
7 | #include "private/qplatformaudiooutput_p.h" |
8 | #include "private/qplatformsurfacecapture_p.h" |
9 | #include "private/qplatformaudiobufferinput_p.h" |
10 | #include "private/qplatformvideoframeinput_p.h" |
11 | #include "private/qplatformcamera_p.h" |
12 | |
13 | #include "qffmpegimagecapture_p.h" |
14 | #include "qffmpegmediarecorder_p.h" |
15 | #include "qvideosink.h" |
16 | #include "qffmpegaudioinput_p.h" |
17 | #include "qaudiosink.h" |
18 | #include "qaudiobuffer.h" |
19 | #include "qaudiooutput.h" |
20 | |
21 | #include <qloggingcategory.h> |
22 | |
23 | QT_BEGIN_NAMESPACE |
24 | |
25 | static Q_LOGGING_CATEGORY(qLcFFmpegMediaCaptureSession, "qt.multimedia.ffmpeg.mediacapturesession" ) |
26 | |
27 | static int preferredAudioSinkBufferSize(const QFFmpegAudioInput &input) |
28 | { |
29 | // Heuristic params to avoid jittering |
30 | // TODO: investigate the reason of jittering and probably reduce the factor |
31 | constexpr int BufferSizeFactor = 2; |
32 | constexpr int BufferSizeExceeding = 4096; |
33 | |
34 | return input.bufferSize() * BufferSizeFactor + BufferSizeExceeding; |
35 | } |
36 | |
37 | QFFmpegMediaCaptureSession::QFFmpegMediaCaptureSession() |
38 | { |
39 | connect(sender: this, signal: &QFFmpegMediaCaptureSession::primaryActiveVideoSourceChanged, context: this, |
40 | slot: &QFFmpegMediaCaptureSession::updateVideoFrameConnection); |
41 | } |
42 | |
43 | QFFmpegMediaCaptureSession::~QFFmpegMediaCaptureSession() = default; |
44 | |
45 | QPlatformCamera *QFFmpegMediaCaptureSession::camera() |
46 | { |
47 | return m_camera; |
48 | } |
49 | |
50 | void QFFmpegMediaCaptureSession::setCamera(QPlatformCamera *camera) |
51 | { |
52 | if (setVideoSource(source&: m_camera, newSource: camera)) |
53 | emit cameraChanged(); |
54 | } |
55 | |
56 | QPlatformSurfaceCapture *QFFmpegMediaCaptureSession::screenCapture() |
57 | { |
58 | return m_screenCapture; |
59 | } |
60 | |
61 | void QFFmpegMediaCaptureSession::setScreenCapture(QPlatformSurfaceCapture *screenCapture) |
62 | { |
63 | if (setVideoSource(source&: m_screenCapture, newSource: screenCapture)) |
64 | emit screenCaptureChanged(); |
65 | } |
66 | |
67 | QPlatformSurfaceCapture *QFFmpegMediaCaptureSession::windowCapture() |
68 | { |
69 | return m_windowCapture; |
70 | } |
71 | |
72 | void QFFmpegMediaCaptureSession::setWindowCapture(QPlatformSurfaceCapture *windowCapture) |
73 | { |
74 | if (setVideoSource(source&: m_windowCapture, newSource: windowCapture)) |
75 | emit windowCaptureChanged(); |
76 | } |
77 | |
78 | QPlatformVideoFrameInput *QFFmpegMediaCaptureSession::videoFrameInput() |
79 | { |
80 | return m_videoFrameInput; |
81 | } |
82 | |
83 | void QFFmpegMediaCaptureSession::setVideoFrameInput(QPlatformVideoFrameInput *input) |
84 | { |
85 | if (setVideoSource(source&: m_videoFrameInput, newSource: input)) |
86 | emit videoFrameInputChanged(); |
87 | } |
88 | |
89 | QPlatformImageCapture *QFFmpegMediaCaptureSession::imageCapture() |
90 | { |
91 | return m_imageCapture; |
92 | } |
93 | |
94 | void QFFmpegMediaCaptureSession::setImageCapture(QPlatformImageCapture *imageCapture) |
95 | { |
96 | if (m_imageCapture == imageCapture) |
97 | return; |
98 | |
99 | if (m_imageCapture) |
100 | m_imageCapture->setCaptureSession(nullptr); |
101 | |
102 | m_imageCapture = static_cast<QFFmpegImageCapture *>(imageCapture); |
103 | |
104 | if (m_imageCapture) |
105 | m_imageCapture->setCaptureSession(this); |
106 | |
107 | emit imageCaptureChanged(); |
108 | } |
109 | |
110 | void QFFmpegMediaCaptureSession::setMediaRecorder(QPlatformMediaRecorder *recorder) |
111 | { |
112 | auto *r = static_cast<QFFmpegMediaRecorder *>(recorder); |
113 | if (m_mediaRecorder == r) |
114 | return; |
115 | |
116 | if (m_mediaRecorder) |
117 | m_mediaRecorder->setCaptureSession(nullptr); |
118 | m_mediaRecorder = r; |
119 | if (m_mediaRecorder) |
120 | m_mediaRecorder->setCaptureSession(this); |
121 | |
122 | emit encoderChanged(); |
123 | } |
124 | |
125 | QPlatformMediaRecorder *QFFmpegMediaCaptureSession::mediaRecorder() |
126 | { |
127 | return m_mediaRecorder; |
128 | } |
129 | |
130 | void QFFmpegMediaCaptureSession::setAudioInput(QPlatformAudioInput *input) |
131 | { |
132 | qCDebug(qLcFFmpegMediaCaptureSession) |
133 | << "set audio input:" << (input ? input->device.description() : "null" ); |
134 | |
135 | auto ffmpegAudioInput = dynamic_cast<QFFmpegAudioInput *>(input); |
136 | Q_ASSERT(!!input == !!ffmpegAudioInput); |
137 | |
138 | if (m_audioInput == ffmpegAudioInput) |
139 | return; |
140 | |
141 | if (m_audioInput) |
142 | m_audioInput->q->disconnect(receiver: this); |
143 | |
144 | m_audioInput = ffmpegAudioInput; |
145 | if (m_audioInput) |
146 | // TODO: implement the signal in QPlatformAudioInput and connect to it, QTBUG-112294 |
147 | connect(sender: m_audioInput->q, signal: &QAudioInput::deviceChanged, context: this, |
148 | slot: &QFFmpegMediaCaptureSession::updateAudioSink); |
149 | |
150 | updateAudioSink(); |
151 | } |
152 | |
153 | void QFFmpegMediaCaptureSession::setAudioBufferInput(QPlatformAudioBufferInput *input) |
154 | { |
155 | // TODO: implement binding to audio sink like setAudioInput does |
156 | m_audioBufferInput = input; |
157 | } |
158 | |
159 | void QFFmpegMediaCaptureSession::updateAudioSink() |
160 | { |
161 | if (m_audioSink) { |
162 | m_audioSink->reset(); |
163 | m_audioSink.reset(); |
164 | } |
165 | |
166 | if (!m_audioInput || !m_audioOutput) |
167 | return; |
168 | |
169 | auto format = m_audioInput->device.preferredFormat(); |
170 | |
171 | if (!m_audioOutput->device.isFormatSupported(format)) |
172 | qWarning() << "Audio source format" << format << "is not compatible with the audio output" ; |
173 | |
174 | m_audioSink = std::make_unique<QAudioSink>(args&: m_audioOutput->device, args&: format); |
175 | |
176 | m_audioBufferSize = preferredAudioSinkBufferSize(input: *m_audioInput); |
177 | m_audioSink->setBufferSize(m_audioBufferSize); |
178 | |
179 | qCDebug(qLcFFmpegMediaCaptureSession) |
180 | << "Create audiosink, format:" << format << "bufferSize:" << m_audioSink->bufferSize() |
181 | << "output device:" << m_audioOutput->device.description(); |
182 | |
183 | m_audioIODevice = m_audioSink->start(); |
184 | if (m_audioIODevice) { |
185 | auto writeToDevice = [this](const QAudioBuffer &buffer) { |
186 | if (m_audioBufferSize < preferredAudioSinkBufferSize(input: *m_audioInput)) { |
187 | qCDebug(qLcFFmpegMediaCaptureSession) |
188 | << "Recreate audiosink due to small buffer size:" << m_audioBufferSize; |
189 | |
190 | updateAudioSink(); |
191 | } |
192 | |
193 | const auto written = |
194 | m_audioIODevice->write(data: buffer.data<const char>(), len: buffer.byteCount()); |
195 | |
196 | if (written < buffer.byteCount()) |
197 | qCWarning(qLcFFmpegMediaCaptureSession) |
198 | << "Not all bytes written:" << written << "vs" << buffer.byteCount(); |
199 | }; |
200 | connect(sender: m_audioInput, signal: &QFFmpegAudioInput::newAudioBuffer, context: m_audioSink.get(), slot&: writeToDevice); |
201 | } else { |
202 | qWarning() << "Failed to start audiosink push mode" ; |
203 | } |
204 | |
205 | updateVolume(); |
206 | } |
207 | |
208 | void QFFmpegMediaCaptureSession::updateVolume() |
209 | { |
210 | if (m_audioSink) |
211 | m_audioSink->setVolume(m_audioOutput->muted ? 0.f : m_audioOutput->volume); |
212 | } |
213 | |
214 | QPlatformAudioInput *QFFmpegMediaCaptureSession::audioInput() const |
215 | { |
216 | return m_audioInput; |
217 | } |
218 | |
219 | void QFFmpegMediaCaptureSession::setVideoPreview(QVideoSink *sink) |
220 | { |
221 | if (std::exchange(obj&: m_videoSink, new_val&: sink) == sink) |
222 | return; |
223 | |
224 | updateVideoFrameConnection(); |
225 | } |
226 | |
227 | void QFFmpegMediaCaptureSession::setAudioOutput(QPlatformAudioOutput *output) |
228 | { |
229 | qCDebug(qLcFFmpegMediaCaptureSession) |
230 | << "set audio output:" << (output ? output->device.description() : "null" ); |
231 | |
232 | if (m_audioOutput == output) |
233 | return; |
234 | |
235 | if (m_audioOutput) |
236 | m_audioOutput->q->disconnect(receiver: this); |
237 | |
238 | m_audioOutput = output; |
239 | if (m_audioOutput) { |
240 | // TODO: implement the signals in QPlatformAudioOutput and connect to them, QTBUG-112294 |
241 | connect(sender: m_audioOutput->q, signal: &QAudioOutput::deviceChanged, context: this, |
242 | slot: &QFFmpegMediaCaptureSession::updateAudioSink); |
243 | connect(sender: m_audioOutput->q, signal: &QAudioOutput::volumeChanged, context: this, |
244 | slot: &QFFmpegMediaCaptureSession::updateVolume); |
245 | connect(sender: m_audioOutput->q, signal: &QAudioOutput::mutedChanged, context: this, |
246 | slot: &QFFmpegMediaCaptureSession::updateVolume); |
247 | } |
248 | |
249 | updateAudioSink(); |
250 | } |
251 | |
252 | void QFFmpegMediaCaptureSession::updateVideoFrameConnection() |
253 | { |
254 | disconnect(m_videoFrameConnection); |
255 | |
256 | if (m_primaryActiveVideoSource && m_videoSink) { |
257 | // deliver frames directly to video sink; |
258 | // AutoConnection type might be a pessimization due to an extra queuing |
259 | // TODO: investigate and integrate direct connection |
260 | m_videoFrameConnection = |
261 | connect(sender: m_primaryActiveVideoSource, signal: &QPlatformVideoSource::newVideoFrame, |
262 | context: m_videoSink, slot: &QVideoSink::setVideoFrame); |
263 | } |
264 | } |
265 | |
266 | void QFFmpegMediaCaptureSession::updatePrimaryActiveVideoSource() |
267 | { |
268 | auto sources = activeVideoSources(); |
269 | auto source = sources.empty() ? nullptr : sources.front(); |
270 | if (std::exchange(obj&: m_primaryActiveVideoSource, new_val&: source) != source) |
271 | emit primaryActiveVideoSourceChanged(); |
272 | } |
273 | |
274 | template<typename VideoSource> |
275 | bool QFFmpegMediaCaptureSession::setVideoSource(QPointer<VideoSource> &source, |
276 | VideoSource *newSource) |
277 | { |
278 | if (source == newSource) |
279 | return false; |
280 | |
281 | if (auto prevSource = std::exchange(source, newSource)) { |
282 | prevSource->setCaptureSession(nullptr); |
283 | prevSource->disconnect(this); |
284 | } |
285 | |
286 | if (source) { |
287 | source->setCaptureSession(this); |
288 | connect(source, &QPlatformVideoSource::activeChanged, this, |
289 | &QFFmpegMediaCaptureSession::updatePrimaryActiveVideoSource); |
290 | connect(source, &QObject::destroyed, this, |
291 | &QFFmpegMediaCaptureSession::updatePrimaryActiveVideoSource, Qt::QueuedConnection); |
292 | } |
293 | |
294 | updatePrimaryActiveVideoSource(); |
295 | |
296 | return true; |
297 | } |
298 | |
299 | QPlatformVideoSource *QFFmpegMediaCaptureSession::primaryActiveVideoSource() |
300 | { |
301 | return m_primaryActiveVideoSource; |
302 | } |
303 | |
304 | std::vector<QPlatformAudioBufferInputBase *> QFFmpegMediaCaptureSession::activeAudioInputs() const |
305 | { |
306 | std::vector<QPlatformAudioBufferInputBase *> result; |
307 | if (m_audioInput) |
308 | result.push_back(x: m_audioInput); |
309 | |
310 | if (m_audioBufferInput) |
311 | result.push_back(x: m_audioBufferInput); |
312 | |
313 | return result; |
314 | } |
315 | |
316 | QT_END_NAMESPACE |
317 | |
318 | #include "moc_qffmpegmediacapturesession_p.cpp" |
319 | |