1 | // Copyright (C) 2021 The Qt Company Ltd. |
---|---|
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "playbackengine/qffmpegaudiorenderer_p.h" |
5 | #include "qaudiosink.h" |
6 | #include "qaudiooutput.h" |
7 | #include "qaudiobufferoutput.h" |
8 | #include "private/qplatformaudiooutput_p.h" |
9 | #include <QtCore/qloggingcategory.h> |
10 | |
11 | #include "qffmpegresampler_p.h" |
12 | #include "qffmpegmediaformatinfo_p.h" |
13 | |
14 | QT_BEGIN_NAMESPACE |
15 | |
16 | static Q_LOGGING_CATEGORY(qLcAudioRenderer, "qt.multimedia.ffmpeg.audiorenderer"); |
17 | |
18 | namespace QFFmpeg { |
19 | |
20 | using namespace std::chrono_literals; |
21 | using namespace std::chrono; |
22 | |
23 | namespace { |
24 | constexpr auto DesiredBufferTime = 110000us; |
25 | constexpr auto MinDesiredBufferTime = 22000us; |
26 | constexpr auto MaxDesiredBufferTime = 64000us; |
27 | constexpr auto MinDesiredFreeBufferTime = 10000us; |
28 | |
29 | // It might be changed with #ifdef, as on Linux, QPulseAudioSink has quite unstable timings, |
30 | // and it needs much more time to make sure that the buffer is overloaded. |
31 | constexpr auto BufferLoadingMeasureTime = 400ms; |
32 | |
33 | constexpr auto DurationBias = 2ms; // avoids extra timer events |
34 | |
35 | qreal sampleRateFactor() { |
36 | // Test purposes: |
37 | // |
38 | // The env var describes a factor for the sample rate of |
39 | // audio data that we feed to the audio sink. |
40 | // |
41 | // In some cases audio sink might consume data slightly slower or faster than expected; |
42 | // even though the synchronization in the audio renderer is supposed to handle it, |
43 | // it makes sense to experiment with QT_MEDIA_PLAYER_AUDIO_SAMPLE_RATE_FACTOR != 1. |
44 | // |
45 | // Set QT_MEDIA_PLAYER_AUDIO_SAMPLE_RATE_FACTOR > 1 (e.g. 1.01 - 1.1) to test high buffer loading |
46 | // or try compensating too fast data consumption by the audio sink. |
47 | // Set QT_MEDIA_PLAYER_AUDIO_SAMPLE_RATE_FACTOR < 1 to test low buffer loading |
48 | // or try compensating too slow data consumption by the audio sink. |
49 | |
50 | |
51 | static const qreal result = []() { |
52 | const auto sampleRateFactorStr = qEnvironmentVariable(varName: "QT_MEDIA_PLAYER_AUDIO_SAMPLE_RATE_FACTOR"); |
53 | bool ok = false; |
54 | const auto result = sampleRateFactorStr.toDouble(ok: &ok); |
55 | return ok ? result : 1.; |
56 | }(); |
57 | |
58 | return result; |
59 | } |
60 | |
61 | QAudioFormat audioFormatFromFrame(const Frame &frame) |
62 | { |
63 | return QFFmpegMediaFormatInfo::audioFormatFromCodecParameters( |
64 | codecPar: *frame.codecContext()->stream()->codecpar); |
65 | } |
66 | |
67 | std::unique_ptr<QFFmpegResampler> createResampler(const Frame &frame, |
68 | const QAudioFormat &outputFormat) |
69 | { |
70 | return std::make_unique<QFFmpegResampler>(args: frame.codecContext(), args: outputFormat, |
71 | args: frame.startTime().get()); |
72 | } |
73 | |
74 | } // namespace |
75 | |
76 | AudioRenderer::AudioRenderer(const TimeController &tc, QAudioOutput *output, |
77 | QAudioBufferOutput *bufferOutput) |
78 | : Renderer(tc), m_output(output), m_bufferOutput(bufferOutput) |
79 | { |
80 | if (output) { |
81 | // TODO: implement the signals in QPlatformAudioOutput and connect to them, QTBUG-112294 |
82 | connect(sender: output, signal: &QAudioOutput::deviceChanged, context: this, slot: &AudioRenderer::onDeviceChanged); |
83 | connect(sender: output, signal: &QAudioOutput::volumeChanged, context: this, slot: &AudioRenderer::updateVolume); |
84 | connect(sender: output, signal: &QAudioOutput::mutedChanged, context: this, slot: &AudioRenderer::updateVolume); |
85 | } |
86 | } |
87 | |
88 | void AudioRenderer::setOutput(QAudioOutput *output) |
89 | { |
90 | setOutputInternal(actual&: m_output, desired: output, changeHandler: [this](QAudioOutput *) { onDeviceChanged(); }); |
91 | } |
92 | |
93 | void AudioRenderer::setOutput(QAudioBufferOutput *bufferOutput) |
94 | { |
95 | setOutputInternal(actual&: m_bufferOutput, desired: bufferOutput, |
96 | changeHandler: [this](QAudioBufferOutput *) { m_bufferOutputChanged = true; }); |
97 | } |
98 | |
99 | AudioRenderer::~AudioRenderer() |
100 | { |
101 | freeOutput(); |
102 | } |
103 | |
104 | void AudioRenderer::updateVolume() |
105 | { |
106 | if (m_sink) |
107 | m_sink->setVolume(m_output->isMuted() ? 0.f : m_output->volume()); |
108 | } |
109 | |
110 | void AudioRenderer::onDeviceChanged() |
111 | { |
112 | m_deviceChanged = true; |
113 | } |
114 | |
115 | Renderer::RenderingResult AudioRenderer::renderInternal(Frame frame) |
116 | { |
117 | if (frame.isValid()) |
118 | updateOutputs(frame); |
119 | |
120 | // push to sink first in order not to waste time on resampling |
121 | // for QAudioBufferOutput |
122 | const RenderingResult result = pushFrameToOutput(frame); |
123 | |
124 | if (m_lastFramePushDone) |
125 | pushFrameToBufferOutput(frame); |
126 | // else // skip pushing the same data to QAudioBufferOutput |
127 | |
128 | m_lastFramePushDone = result.done; |
129 | |
130 | return result; |
131 | } |
132 | |
133 | AudioRenderer::RenderingResult AudioRenderer::pushFrameToOutput(const Frame &frame) |
134 | { |
135 | if (!m_ioDevice || !m_resampler) |
136 | return {}; |
137 | |
138 | Q_ASSERT(m_sink); |
139 | |
140 | auto firstFrameFlagGuard = qScopeGuard(f: [&]() { m_firstFrameToSink = false; }); |
141 | |
142 | const SynchronizationStamp syncStamp{ .audioSinkState: m_sink->state(), .audioSinkBytesFree: m_sink->bytesFree(), |
143 | .bufferBytesWritten: m_bufferedData.offset, .timePoint: RealClock::now() }; |
144 | |
145 | if (!m_bufferedData.isValid()) { |
146 | if (!frame.isValid()) { |
147 | if (std::exchange(obj&: m_drained, new_val: true)) |
148 | return {}; |
149 | |
150 | const auto time = bufferLoadingTime(syncStamp); |
151 | |
152 | qCDebug(qLcAudioRenderer) << "Draining AudioRenderer, time:"<< time; |
153 | |
154 | return { .done: time.count() == 0, .recheckInterval: time }; |
155 | } |
156 | |
157 | m_bufferedData = { .buffer: m_resampler->resample(frame: frame.avFrame()) }; |
158 | } |
159 | |
160 | if (m_bufferedData.isValid()) { |
161 | // synchronize after "QIODevice::write" to deliver audio data to the sink ASAP. |
162 | auto syncGuard = qScopeGuard(f: [&]() { updateSynchronization(stamp: syncStamp, frame); }); |
163 | |
164 | const auto bytesWritten = m_ioDevice->write(data: m_bufferedData.data(), len: m_bufferedData.size()); |
165 | |
166 | m_bufferedData.offset += bytesWritten; |
167 | |
168 | if (m_bufferedData.size() <= 0) { |
169 | m_bufferedData = {}; |
170 | |
171 | return {}; |
172 | } |
173 | |
174 | const auto remainingDuration = durationForBytes(bytes: m_bufferedData.size()); |
175 | |
176 | return { .done: false, |
177 | .recheckInterval: std::min(a: remainingDuration + DurationBias, b: m_timings.actualBufferDuration / 2) }; |
178 | } |
179 | |
180 | return {}; |
181 | } |
182 | |
183 | void AudioRenderer::pushFrameToBufferOutput(const Frame &frame) |
184 | { |
185 | if (!m_bufferOutput) |
186 | return; |
187 | |
188 | if (frame.isValid()) { |
189 | Q_ASSERT(m_bufferOutputResampler); |
190 | |
191 | // TODO: get buffer from m_bufferedData if resample formats are equal |
192 | QAudioBuffer buffer = m_bufferOutputResampler->resample(frame: frame.avFrame()); |
193 | emit m_bufferOutput->audioBufferReceived(buffer); |
194 | } else { |
195 | emit m_bufferOutput->audioBufferReceived(buffer: {}); |
196 | } |
197 | } |
198 | |
199 | void AudioRenderer::onPlaybackRateChanged() |
200 | { |
201 | m_resampler.reset(); |
202 | } |
203 | |
204 | std::chrono::milliseconds AudioRenderer::timerInterval() const |
205 | { |
206 | constexpr auto MaxFixableInterval = 50ms; |
207 | |
208 | const auto interval = Renderer::timerInterval(); |
209 | |
210 | if (m_firstFrameToSink || !m_sink || m_sink->state() != QAudio::IdleState |
211 | || interval > MaxFixableInterval) |
212 | return interval; |
213 | |
214 | return 0ms; |
215 | } |
216 | |
217 | void AudioRenderer::onPauseChanged() |
218 | { |
219 | m_firstFrameToSink = true; |
220 | Renderer::onPauseChanged(); |
221 | } |
222 | |
223 | void AudioRenderer::initResampler(const Frame &frame) |
224 | { |
225 | // We recreate resampler whenever format is changed |
226 | |
227 | auto resamplerFormat = m_sinkFormat; |
228 | resamplerFormat.setSampleRate( |
229 | qRound(d: m_sinkFormat.sampleRate() / playbackRate() * sampleRateFactor())); |
230 | m_resampler = createResampler(frame, outputFormat: resamplerFormat); |
231 | } |
232 | |
233 | void AudioRenderer::freeOutput() |
234 | { |
235 | qCDebug(qLcAudioRenderer) << "Free audio output"; |
236 | if (m_sink) { |
237 | m_sink->reset(); |
238 | |
239 | // TODO: inestigate if it's enough to reset the sink without deleting |
240 | m_sink.reset(); |
241 | } |
242 | |
243 | m_ioDevice = nullptr; |
244 | |
245 | m_bufferedData = {}; |
246 | m_deviceChanged = false; |
247 | m_sinkFormat = {}; |
248 | m_timings = {}; |
249 | m_bufferLoadingInfo = {}; |
250 | } |
251 | |
252 | void AudioRenderer::updateOutputs(const Frame &frame) |
253 | { |
254 | if (m_deviceChanged) { |
255 | freeOutput(); |
256 | m_resampler.reset(); |
257 | } |
258 | |
259 | if (m_bufferOutput) { |
260 | if (m_bufferOutputChanged) { |
261 | m_bufferOutputChanged = false; |
262 | m_bufferOutputResampler.reset(); |
263 | } |
264 | |
265 | if (!m_bufferOutputResampler) { |
266 | QAudioFormat outputFormat = m_bufferOutput->format(); |
267 | if (!outputFormat.isValid()) |
268 | outputFormat = audioFormatFromFrame(frame); |
269 | m_bufferOutputResampler = createResampler(frame, outputFormat); |
270 | } |
271 | } |
272 | |
273 | if (!m_output) |
274 | return; |
275 | |
276 | if (!m_sinkFormat.isValid()) { |
277 | m_sinkFormat = audioFormatFromFrame(frame); |
278 | m_sinkFormat.setChannelConfig(m_output->device().channelConfiguration()); |
279 | } |
280 | |
281 | if (!m_sink) { |
282 | // Insert a delay here to test time offset synchronization, e.g. QThread::sleep(1) |
283 | m_sink = std::make_unique<QAudioSink>(args: m_output->device(), args&: m_sinkFormat); |
284 | updateVolume(); |
285 | m_sink->setBufferSize(m_sinkFormat.bytesForDuration(microseconds: DesiredBufferTime.count())); |
286 | m_ioDevice = m_sink->start(); |
287 | m_firstFrameToSink = true; |
288 | |
289 | connect(sender: m_sink.get(), signal: &QAudioSink::stateChanged, context: this, |
290 | slot: &AudioRenderer::onAudioSinkStateChanged); |
291 | |
292 | m_timings.actualBufferDuration = durationForBytes(bytes: m_sink->bufferSize()); |
293 | m_timings.maxSoundDelay = qMin(a: MaxDesiredBufferTime, |
294 | b: m_timings.actualBufferDuration - MinDesiredFreeBufferTime); |
295 | m_timings.minSoundDelay = MinDesiredBufferTime; |
296 | |
297 | Q_ASSERT(DurationBias < m_timings.minSoundDelay |
298 | && m_timings.maxSoundDelay < m_timings.actualBufferDuration); |
299 | } |
300 | |
301 | if (!m_resampler) |
302 | initResampler(frame); |
303 | } |
304 | |
305 | void AudioRenderer::updateSynchronization(const SynchronizationStamp &stamp, const Frame &frame) |
306 | { |
307 | if (!frame.isValid()) |
308 | return; |
309 | |
310 | Q_ASSERT(m_sink); |
311 | |
312 | const auto bufferLoadingTime = this->bufferLoadingTime(syncStamp: stamp); |
313 | const auto currentFrameDelay = frameDelay(frame, timePoint: stamp.timePoint); |
314 | const auto writtenTime = durationForBytes(bytes: stamp.bufferBytesWritten); |
315 | const auto soundDelay = currentFrameDelay + bufferLoadingTime - writtenTime; |
316 | |
317 | auto synchronize = [&](microseconds fixedDelay, microseconds targetSoundDelay) { |
318 | // TODO: investigate if we need sample compensation here |
319 | |
320 | changeRendererTime(offset: fixedDelay - targetSoundDelay); |
321 | if (qLcAudioRenderer().isDebugEnabled()) { |
322 | // clang-format off |
323 | qCDebug(qLcAudioRenderer) |
324 | << "Change rendering time:" |
325 | << "\n First frame:"<< m_firstFrameToSink |
326 | << "\n Delay (frame+buffer-written):"<< currentFrameDelay << "+" |
327 | << bufferLoadingTime << "-" |
328 | << writtenTime << "=" |
329 | << soundDelay |
330 | << "\n Fixed delay:"<< fixedDelay |
331 | << "\n Target delay:"<< targetSoundDelay |
332 | << "\n Buffer durations (min/max/limit):"<< m_timings.minSoundDelay |
333 | << m_timings.maxSoundDelay |
334 | << m_timings.actualBufferDuration |
335 | << "\n Audio sink state:"<< stamp.audioSinkState; |
336 | // clang-format on |
337 | } |
338 | }; |
339 | |
340 | const auto loadingType = soundDelay > m_timings.maxSoundDelay ? BufferLoadingInfo::High |
341 | : soundDelay < m_timings.minSoundDelay ? BufferLoadingInfo::Low |
342 | : BufferLoadingInfo::Moderate; |
343 | |
344 | if (loadingType != m_bufferLoadingInfo.type) { |
345 | // qCDebug(qLcAudioRenderer) << "Change buffer loading type:" << |
346 | // m_bufferLoadingInfo.type |
347 | // << "->" << loadingType << "soundDelay:" << soundDelay; |
348 | m_bufferLoadingInfo = { .type: loadingType, .timePoint: stamp.timePoint, .delay: soundDelay }; |
349 | } |
350 | |
351 | if (loadingType != BufferLoadingInfo::Moderate) { |
352 | const auto isHigh = loadingType == BufferLoadingInfo::High; |
353 | const auto shouldHandleIdle = stamp.audioSinkState == QAudio::IdleState && !isHigh; |
354 | |
355 | auto &fixedDelay = m_bufferLoadingInfo.delay; |
356 | |
357 | fixedDelay = shouldHandleIdle ? soundDelay |
358 | : isHigh ? qMin(a: soundDelay, b: fixedDelay) |
359 | : qMax(a: soundDelay, b: fixedDelay); |
360 | |
361 | if (stamp.timePoint - m_bufferLoadingInfo.timePoint > BufferLoadingMeasureTime |
362 | || (m_firstFrameToSink && isHigh) || shouldHandleIdle) { |
363 | const auto targetDelay = isHigh |
364 | ? (m_timings.maxSoundDelay + m_timings.minSoundDelay) / 2 |
365 | : m_timings.minSoundDelay + DurationBias; |
366 | |
367 | synchronize(fixedDelay, targetDelay); |
368 | m_bufferLoadingInfo = { .type: BufferLoadingInfo::Moderate, .timePoint: stamp.timePoint, .delay: targetDelay }; |
369 | } |
370 | } |
371 | } |
372 | |
373 | microseconds AudioRenderer::bufferLoadingTime(const SynchronizationStamp &syncStamp) const |
374 | { |
375 | Q_ASSERT(m_sink); |
376 | |
377 | if (syncStamp.audioSinkState == QAudio::IdleState) |
378 | return microseconds(0); |
379 | |
380 | const auto bytes = qMax(a: m_sink->bufferSize() - syncStamp.audioSinkBytesFree, b: 0); |
381 | |
382 | #ifdef Q_OS_ANDROID |
383 | // The hack has been added due to QAndroidAudioSink issues (QTBUG-118609). |
384 | // The method QAndroidAudioSink::bytesFree returns 0 or bufferSize, intermediate values are not |
385 | // available now; to be fixed. |
386 | if (bytes == 0) |
387 | return m_timings.minSoundDelay + MinDesiredBufferTime; |
388 | #endif |
389 | |
390 | return durationForBytes(bytes); |
391 | } |
392 | |
393 | void AudioRenderer::onAudioSinkStateChanged(QAudio::State state) |
394 | { |
395 | if (state == QAudio::IdleState && !m_firstFrameToSink && !m_deviceChanged) |
396 | scheduleNextStep(); |
397 | } |
398 | |
399 | microseconds AudioRenderer::durationForBytes(qsizetype bytes) const |
400 | { |
401 | return microseconds(m_sinkFormat.durationForBytes(byteCount: static_cast<qint32>(bytes))); |
402 | } |
403 | |
404 | } // namespace QFFmpeg |
405 | |
406 | QT_END_NAMESPACE |
407 | |
408 | #include "moc_qffmpegaudiorenderer_p.cpp" |
409 |
Definitions
- qLcAudioRenderer
- DesiredBufferTime
- MinDesiredBufferTime
- MaxDesiredBufferTime
- MinDesiredFreeBufferTime
- BufferLoadingMeasureTime
- DurationBias
- sampleRateFactor
- audioFormatFromFrame
- createResampler
- AudioRenderer
- setOutput
- setOutput
- ~AudioRenderer
- updateVolume
- onDeviceChanged
- renderInternal
- pushFrameToOutput
- pushFrameToBufferOutput
- onPlaybackRateChanged
- timerInterval
- onPauseChanged
- initResampler
- freeOutput
- updateOutputs
- updateSynchronization
- bufferLoadingTime
- onAudioSinkStateChanged
Learn to use CMake with our Intro Training
Find out more