1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "playbackengine/qffmpegaudiorenderer_p.h"
5
6#include <QtMultimedia/qaudiosink.h>
7#include <QtMultimedia/qaudiooutput.h>
8#include <QtMultimedia/qaudiobufferoutput.h>
9#include <QtMultimedia/private/qaudiobuffer_support_p.h>
10#include <QtMultimedia/private/qplatformaudiooutput_p.h>
11
12#include <QtCore/qloggingcategory.h>
13
14#include "qffmpegaudioframeconverter_p.h"
15#include "qffmpegmediaformatinfo_p.h"
16#include "qffmpegresampler_p.h"
17
18QT_BEGIN_NAMESPACE
19
20Q_STATIC_LOGGING_CATEGORY(qLcAudioRenderer, "qt.multimedia.ffmpeg.audiorenderer");
21
22namespace QFFmpeg {
23
24using namespace std::chrono_literals;
25using namespace std::chrono;
26
27namespace {
28constexpr auto DesiredBufferTime = 110000us;
29constexpr auto MinDesiredBufferTime = 22000us;
30constexpr auto MaxDesiredBufferTime = 64000us;
31constexpr auto MinDesiredFreeBufferTime = 10000us;
32
33// It might be changed with #ifdef, as on Linux, QPulseAudioSink has quite unstable timings,
34// and it needs much more time to make sure that the buffer is overloaded.
35constexpr auto BufferLoadingMeasureTime = 400ms;
36
37constexpr auto DurationBias = 2ms; // avoids extra timer events
38
39QAudioFormat audioFormatFromFrame(const Frame &frame)
40{
41 return QFFmpegMediaFormatInfo::audioFormatFromCodecParameters(
42 codecPar: *frame.codecContext()->stream()->codecpar);
43}
44
45} // namespace
46
47AudioRenderer::AudioRenderer(const TimeController &tc, QAudioOutput *output,
48 QAudioBufferOutput *bufferOutput, bool pitchCompensation)
49 : Renderer(tc),
50 m_output(output),
51 m_bufferOutput(bufferOutput),
52 m_pitchCompensation(pitchCompensation)
53{
54 if (output) {
55 // TODO: implement the signals in QPlatformAudioOutput and connect to them, QTBUG-112294
56 connect(sender: output, signal: &QAudioOutput::deviceChanged, context: this, slot: &AudioRenderer::onDeviceChanged);
57 connect(sender: output, signal: &QAudioOutput::volumeChanged, context: this, slot: &AudioRenderer::updateVolume);
58 connect(sender: output, signal: &QAudioOutput::mutedChanged, context: this, slot: &AudioRenderer::updateVolume);
59 }
60}
61
62void AudioRenderer::setOutput(QAudioOutput *output)
63{
64 setOutputInternal(actual&: m_output, desired: output, changeHandler: [this](QAudioOutput *) { onDeviceChanged(); });
65}
66
67void AudioRenderer::setOutput(QAudioBufferOutput *bufferOutput)
68{
69 setOutputInternal(actual&: m_bufferOutput, desired: bufferOutput,
70 changeHandler: [this](QAudioBufferOutput *) { m_bufferOutputChanged = true; });
71}
72
73void AudioRenderer::setPitchCompensation(bool enabled)
74{
75 QMetaObject::invokeMethod(object: this, function: [this, enabled] {
76 if (m_pitchCompensation == enabled)
77 return;
78
79 m_pitchCompensation = enabled;
80 m_audioFrameConverter.reset();
81 });
82}
83
84AudioRenderer::~AudioRenderer()
85{
86 freeOutput();
87}
88
89void AudioRenderer::updateVolume()
90{
91 if (m_sink)
92 m_sink->setVolume(m_output->isMuted() ? 0.f : m_output->volume());
93}
94
95void AudioRenderer::onDeviceChanged()
96{
97 m_deviceChanged = true;
98}
99
100Renderer::RenderingResult AudioRenderer::renderInternal(Frame frame)
101{
102 if (frame.isValid())
103 updateOutputs(frame);
104
105 // push to sink first in order not to waste time on resampling
106 // for QAudioBufferOutput
107 const RenderingResult result = pushFrameToOutput(frame);
108
109 if (m_lastFramePushDone)
110 pushFrameToBufferOutput(frame);
111 // else // skip pushing the same data to QAudioBufferOutput
112
113 m_lastFramePushDone = result.done;
114
115 return result;
116}
117
118AudioRenderer::RenderingResult AudioRenderer::pushFrameToOutput(const Frame &frame)
119{
120 if (!m_ioDevice || !m_audioFrameConverter)
121 return {};
122
123 Q_ASSERT(m_sink);
124
125 auto firstFrameFlagGuard = qScopeGuard(f: [&]() { m_firstFrameToSink = false; });
126
127 const SynchronizationStamp syncStamp{ .audioSinkState: m_sink->state(), .audioSinkBytesFree: m_sink->bytesFree(),
128 .bufferBytesWritten: m_bufferedData.offset, .timePoint: SteadyClock::now() };
129
130 if (!m_bufferedData.isValid()) {
131 if (!frame.isValid()) {
132 if (std::exchange(obj&: m_drained, new_val: true))
133 return {};
134
135 const auto time = bufferLoadingTime(syncStamp);
136
137 qCDebug(qLcAudioRenderer) << "Draining AudioRenderer, time:" << time;
138
139 return { .done: time.count() == 0, .recheckInterval: time };
140 }
141
142 m_bufferedData = {
143 .buffer: m_audioFrameConverter->convert(frame.avFrame()),
144 };
145 }
146
147 if (m_bufferedData.isValid()) {
148 // synchronize after "QIODevice::write" to deliver audio data to the sink ASAP.
149 auto syncGuard = qScopeGuard(f: [&]() { updateSynchronization(stamp: syncStamp, frame); });
150
151 const auto bytesWritten = m_ioDevice->write(data: m_bufferedData.data(), len: m_bufferedData.size());
152
153 m_bufferedData.offset += bytesWritten;
154
155 if (m_bufferedData.size() <= 0) {
156 m_bufferedData = {};
157
158 return {};
159 }
160
161 const auto remainingDuration = durationForBytes(bytes: m_bufferedData.size());
162
163 return { .done: false,
164 .recheckInterval: std::min(a: remainingDuration + DurationBias, b: m_timings.actualBufferDuration / 2) };
165 }
166
167 return {};
168}
169
170void AudioRenderer::pushFrameToBufferOutput(const Frame &frame)
171{
172 if (!m_bufferOutput)
173 return;
174
175 if (frame.isValid()) {
176 Q_ASSERT(m_bufferOutputResampler);
177
178 // TODO: get buffer from m_bufferedData if resample formats are equal
179 QAudioBuffer buffer = m_bufferOutputResampler->resample(frame: frame.avFrame());
180 emit m_bufferOutput->audioBufferReceived(buffer);
181 } else {
182 emit m_bufferOutput->audioBufferReceived(buffer: {});
183 }
184}
185
186void AudioRenderer::onPlaybackRateChanged()
187{
188 m_audioFrameConverter.reset();
189}
190
191std::chrono::milliseconds AudioRenderer::timerInterval() const
192{
193 constexpr auto MaxFixableInterval = 50ms;
194
195 const auto interval = Renderer::timerInterval();
196
197 if (m_firstFrameToSink || !m_sink || m_sink->state() != QAudio::IdleState
198 || interval > MaxFixableInterval)
199 return interval;
200
201 return 0ms;
202}
203
204void AudioRenderer::onPauseChanged()
205{
206 m_firstFrameToSink = true;
207 Renderer::onPauseChanged();
208}
209
210void AudioRenderer::initAudioFrameConverter(const Frame &frame)
211{
212 // We recreate the frame converter whenever format or playback rate is changed
213 if (!m_pitchCompensation || qFuzzyCompare(p1: playbackRate(), p2: 1.0f)) {
214 m_audioFrameConverter = makeTrivialAudioFrameConverter(frame, outputFormat: m_sinkFormat, playbackRate: playbackRate());
215 } else {
216 m_audioFrameConverter =
217 makePitchShiftingAudioFrameConverter(frame, outputFormat: m_sinkFormat, playbackRate: playbackRate());
218 }
219}
220
221void AudioRenderer::freeOutput()
222{
223 qCDebug(qLcAudioRenderer) << "Free audio output";
224 if (m_sink) {
225 m_sink->reset();
226
227 // TODO: inestigate if it's enough to reset the sink without deleting
228 m_sink.reset();
229 }
230
231 m_ioDevice = nullptr;
232
233 m_bufferedData = {};
234 m_deviceChanged = false;
235 m_sinkFormat = {};
236 m_timings = {};
237 m_bufferLoadingInfo = {};
238}
239
240void AudioRenderer::updateOutputs(const Frame &frame)
241{
242 if (m_deviceChanged) {
243 freeOutput();
244 m_audioFrameConverter.reset();
245 }
246
247 if (m_bufferOutput) {
248 if (m_bufferOutputChanged) {
249 m_bufferOutputChanged = false;
250 m_bufferOutputResampler.reset();
251 }
252
253 if (!m_bufferOutputResampler) {
254 QAudioFormat outputFormat = m_bufferOutput->format();
255 if (!outputFormat.isValid())
256 outputFormat = audioFormatFromFrame(frame);
257 m_bufferOutputResampler = createResampler(frame, outputFormat);
258 }
259 }
260
261 if (!m_output)
262 return;
263
264 if (!m_sinkFormat.isValid()) {
265 m_sinkFormat = audioFormatFromFrame(frame);
266 m_sinkFormat.setChannelConfig(m_output->device().channelConfiguration());
267 }
268
269 if (!m_sink) {
270 // Insert a delay here to test time offset synchronization, e.g. QThread::sleep(1)
271 m_sink = std::make_unique<QAudioSink>(args: m_output->device(), args&: m_sinkFormat);
272 updateVolume();
273 m_sink->setBufferSize(m_sinkFormat.bytesForDuration(microseconds: DesiredBufferTime.count()));
274 m_ioDevice = m_sink->start();
275 m_firstFrameToSink = true;
276
277 connect(sender: m_sink.get(), signal: &QAudioSink::stateChanged, context: this,
278 slot: &AudioRenderer::onAudioSinkStateChanged);
279
280 m_timings.actualBufferDuration = durationForBytes(bytes: m_sink->bufferSize());
281 m_timings.maxSoundDelay = qMin(a: MaxDesiredBufferTime,
282 b: m_timings.actualBufferDuration - MinDesiredFreeBufferTime);
283 m_timings.minSoundDelay = MinDesiredBufferTime;
284
285 Q_ASSERT(DurationBias < m_timings.minSoundDelay
286 && m_timings.maxSoundDelay < m_timings.actualBufferDuration);
287 }
288
289 if (!m_audioFrameConverter)
290 initAudioFrameConverter(frame);
291}
292
293void AudioRenderer::updateSynchronization(const SynchronizationStamp &stamp, const Frame &frame)
294{
295 if (!frame.isValid())
296 return;
297
298 Q_ASSERT(m_sink);
299
300 const auto bufferLoadingTime = this->bufferLoadingTime(syncStamp: stamp);
301 const auto currentFrameDelay = frameDelay(frame, timePoint: stamp.timePoint);
302 const auto writtenTime = durationForBytes(bytes: stamp.bufferBytesWritten);
303 const auto soundDelay = currentFrameDelay + bufferLoadingTime - writtenTime;
304
305 auto synchronize = [&](microseconds fixedDelay, microseconds targetSoundDelay) {
306 // TODO: investigate if we need sample compensation here
307
308 changeRendererTime(offset: fixedDelay - targetSoundDelay);
309 if (qLcAudioRenderer().isDebugEnabled()) {
310 // clang-format off
311 qCDebug(qLcAudioRenderer)
312 << "Change rendering time:"
313 << "\n First frame:" << m_firstFrameToSink
314 << "\n Delay (frame+buffer-written):" << currentFrameDelay << "+"
315 << bufferLoadingTime << "-"
316 << writtenTime << "="
317 << soundDelay
318 << "\n Fixed delay:" << fixedDelay
319 << "\n Target delay:" << targetSoundDelay
320 << "\n Buffer durations (min/max/limit):" << m_timings.minSoundDelay
321 << m_timings.maxSoundDelay
322 << m_timings.actualBufferDuration
323 << "\n Audio sink state:" << stamp.audioSinkState;
324 // clang-format on
325 }
326 };
327
328 const auto loadingType = soundDelay > m_timings.maxSoundDelay ? BufferLoadingInfo::High
329 : soundDelay < m_timings.minSoundDelay ? BufferLoadingInfo::Low
330 : BufferLoadingInfo::Moderate;
331
332 if (loadingType != m_bufferLoadingInfo.type) {
333 // qCDebug(qLcAudioRenderer) << "Change buffer loading type:" <<
334 // m_bufferLoadingInfo.type
335 // << "->" << loadingType << "soundDelay:" << soundDelay;
336 m_bufferLoadingInfo = { .type: loadingType, .timePoint: stamp.timePoint, .delay: soundDelay };
337 }
338
339 if (loadingType != BufferLoadingInfo::Moderate) {
340 const auto isHigh = loadingType == BufferLoadingInfo::High;
341 const auto shouldHandleIdle = stamp.audioSinkState == QAudio::IdleState && !isHigh;
342
343 auto &fixedDelay = m_bufferLoadingInfo.delay;
344
345 fixedDelay = shouldHandleIdle ? soundDelay
346 : isHigh ? qMin(a: soundDelay, b: fixedDelay)
347 : qMax(a: soundDelay, b: fixedDelay);
348
349 if (stamp.timePoint - m_bufferLoadingInfo.timePoint > BufferLoadingMeasureTime
350 || (m_firstFrameToSink && isHigh) || shouldHandleIdle) {
351 const auto targetDelay = isHigh
352 ? (m_timings.maxSoundDelay + m_timings.minSoundDelay) / 2
353 : m_timings.minSoundDelay + DurationBias;
354
355 synchronize(fixedDelay, targetDelay);
356 m_bufferLoadingInfo = { .type: BufferLoadingInfo::Moderate, .timePoint: stamp.timePoint, .delay: targetDelay };
357 }
358 }
359}
360
361microseconds AudioRenderer::bufferLoadingTime(const SynchronizationStamp &syncStamp) const
362{
363 Q_ASSERT(m_sink);
364
365 if (syncStamp.audioSinkState == QAudio::IdleState)
366 return microseconds(0);
367
368 const auto bytes = qMax(a: m_sink->bufferSize() - syncStamp.audioSinkBytesFree, b: 0);
369
370#ifdef Q_OS_ANDROID
371 // The hack has been added due to QAndroidAudioSink issues (QTBUG-118609).
372 // The method QAndroidAudioSink::bytesFree returns 0 or bufferSize, intermediate values are not
373 // available now; to be fixed.
374 if (bytes == 0)
375 return m_timings.minSoundDelay + MinDesiredBufferTime;
376#endif
377
378 return durationForBytes(bytes);
379}
380
381void AudioRenderer::onAudioSinkStateChanged(QAudio::State state)
382{
383 if (state == QAudio::IdleState && !m_firstFrameToSink && !m_deviceChanged)
384 scheduleNextStep();
385}
386
387microseconds AudioRenderer::durationForBytes(qsizetype bytes) const
388{
389 return microseconds(m_sinkFormat.durationForBytes(byteCount: static_cast<qint32>(bytes)));
390}
391
392} // namespace QFFmpeg
393
394QT_END_NAMESPACE
395
396#include "moc_qffmpegaudiorenderer_p.cpp"
397

source code of qtmultimedia/src/plugins/multimedia/ffmpeg/playbackengine/qffmpegaudiorenderer.cpp