1 | // Copyright (C) 2021 The Qt Company Ltd. |
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "playbackengine/qffmpegdemuxer_p.h" |
5 | #include <qloggingcategory.h> |
6 | #include <chrono> |
7 | |
8 | QT_BEGIN_NAMESPACE |
9 | |
10 | namespace QFFmpeg { |
11 | |
12 | // 4 sec for buffering. TODO: maybe move to env var customization |
13 | static constexpr TrackDuration MaxBufferedDurationUs{ 4'000'000 }; |
14 | |
15 | // around 4 sec of hdr video |
16 | static constexpr qint64 MaxBufferedSize = 32 * 1024 * 1024; |
17 | |
18 | static Q_LOGGING_CATEGORY(qLcDemuxer, "qt.multimedia.ffmpeg.demuxer" ); |
19 | |
20 | static TrackPosition packetEndPos(const Packet &packet, const AVStream *stream, |
21 | const AVFormatContext *context) |
22 | { |
23 | const AVPacket &avPacket = *packet.avPacket(); |
24 | return packet.loopOffset().loopStartTimeUs.asDuration() |
25 | + toTrackPosition(streamPosition: AVStreamPosition(avPacket.pts + avPacket.duration), avStream: stream, formatContext: context); |
26 | } |
27 | |
28 | static bool isPacketWithinStreamDuration(const AVFormatContext *context, const Packet &packet) |
29 | { |
30 | const AVPacket &avPacket = *packet.avPacket(); |
31 | const AVStream &avStream = *context->streams[avPacket.stream_index]; |
32 | const AVStreamDuration streamDuration(avStream.duration); |
33 | if (streamDuration.get() <= 0 |
34 | || context->duration_estimation_method != AVFMT_DURATION_FROM_STREAM) |
35 | return true; // Stream duration shouldn't or doesn't need to be compared to pts |
36 | |
37 | if (avStream.start_time != AV_NOPTS_VALUE) |
38 | return AVStreamDuration(avPacket.pts - avStream.start_time) <= streamDuration; |
39 | |
40 | const TrackPosition trackPos = toTrackPosition(streamPosition: AVStreamPosition(avPacket.pts), avStream: &avStream, formatContext: context); |
41 | const TrackPosition trackPosOfStreamEnd = toTrackDuration(streamDuration, avStream: &avStream).asTimePoint(); |
42 | return trackPos <= trackPosOfStreamEnd; |
43 | |
44 | // TODO: If there is a packet that starts before the canonical end of the stream but has a |
45 | // malformed duration, rework doNextStep to check for eof after that packet. |
46 | } |
47 | |
48 | Demuxer::Demuxer(AVFormatContext *context, TrackPosition initialPosUs, bool seekPending, |
49 | const LoopOffset &loopOffset, const StreamIndexes &streamIndexes, int loops) |
50 | : m_context(context), |
51 | m_seeked(!seekPending && initialPosUs == TrackPosition{ 0 }), // Don't seek to 0 unless seek requested |
52 | m_posInLoopUs{ initialPosUs }, |
53 | m_loopOffset(loopOffset), |
54 | m_loops(loops) |
55 | { |
56 | qCDebug(qLcDemuxer) << "Create demuxer." |
57 | << "pos:" << m_posInLoopUs.get() |
58 | << "loop offset:" << m_loopOffset.loopStartTimeUs.get() |
59 | << "loop index:" << m_loopOffset.loopIndex << "loops:" << loops; |
60 | |
61 | Q_ASSERT(m_context); |
62 | |
63 | for (auto i = 0; i < QPlatformMediaPlayer::NTrackTypes; ++i) { |
64 | if (streamIndexes[i] >= 0) { |
65 | const auto trackType = static_cast<QPlatformMediaPlayer::TrackType>(i); |
66 | qCDebug(qLcDemuxer) << "Activate demuxing stream" << i << ", trackType:" << trackType; |
67 | m_streams[streamIndexes[i]] = { .trackType: trackType }; |
68 | } |
69 | } |
70 | } |
71 | |
72 | void Demuxer::doNextStep() |
73 | { |
74 | ensureSeeked(); |
75 | |
76 | Packet packet(m_loopOffset, AVPacketUPtr{ av_packet_alloc() }, id()); |
77 | |
78 | const int demuxStatus = av_read_frame(s: m_context, pkt: packet.avPacket()); |
79 | |
80 | if (demuxStatus == AVERROR_EOF || !isPacketWithinStreamDuration(context: m_context, packet)) { |
81 | ++m_loopOffset.loopIndex; |
82 | |
83 | const auto loops = m_loops.loadAcquire(); |
84 | if (loops >= 0 && m_loopOffset.loopIndex >= loops) { |
85 | qCDebug(qLcDemuxer) << "finish demuxing" ; |
86 | |
87 | if (!std::exchange(obj&: m_buffered, new_val: true)) |
88 | emit packetsBuffered(); |
89 | |
90 | setAtEnd(true); |
91 | } else { |
92 | // start next loop |
93 | m_seeked = false; |
94 | m_posInLoopUs = TrackPosition(0); |
95 | m_loopOffset.loopStartTimeUs = m_maxPacketsEndPos; |
96 | m_maxPacketsEndPos = TrackPosition(0); |
97 | |
98 | ensureSeeked(); |
99 | |
100 | qCDebug(qLcDemuxer) << "Demuxer loops changed. Index:" << m_loopOffset.loopIndex |
101 | << "Offset:" << m_loopOffset.loopStartTimeUs.get(); |
102 | |
103 | scheduleNextStep(allowDoImmediatelly: false); |
104 | } |
105 | |
106 | return; |
107 | } |
108 | |
109 | if (demuxStatus < 0) { |
110 | qCWarning(qLcDemuxer) << "Demuxing failed" << demuxStatus << err2str(errnum: demuxStatus); |
111 | |
112 | if (demuxStatus == AVERROR(EAGAIN) && m_demuxerRetryCount != s_maxDemuxerRetries) { |
113 | // When demuxer reports EAGAIN, we can try to recover by calling av_read_frame again. |
114 | // The documentation for av_read_frame does not mention this, but FFmpeg command line |
115 | // tool does this, see input_thread() function in ffmpeg_demux.c. There, the response |
116 | // is to sleep for 10 ms before trying again. NOTE: We do not have any known way of |
117 | // reproducing this in our tests. |
118 | ++m_demuxerRetryCount; |
119 | |
120 | qCDebug(qLcDemuxer) << "Retrying" ; |
121 | scheduleNextStep(allowDoImmediatelly: false); |
122 | } else { |
123 | // av_read_frame reports another error. This could for example happen if network is |
124 | // disconnected while playing a network stream, where av_read_frame may return |
125 | // ETIMEDOUT. |
126 | // TODO: Demuxer errors should likely stop playback in media player examples. |
127 | emit error(code: QMediaPlayer::ResourceError, |
128 | errorString: QLatin1StringView("Demuxing failed" )); |
129 | } |
130 | |
131 | return; |
132 | } |
133 | |
134 | m_demuxerRetryCount = 0; |
135 | |
136 | auto &avPacket = *packet.avPacket(); |
137 | |
138 | const auto streamIndex = avPacket.stream_index; |
139 | const auto stream = m_context->streams[streamIndex]; |
140 | |
141 | auto it = m_streams.find(x: streamIndex); |
142 | if (it != m_streams.end()) { |
143 | auto &streamData = it->second; |
144 | |
145 | const TrackPosition endPos = packetEndPos(packet, stream, context: m_context); |
146 | m_maxPacketsEndPos = qMax(a: m_maxPacketsEndPos, b: endPos); |
147 | |
148 | // Increase buffered metrics as the packet has been processed. |
149 | |
150 | streamData.bufferedDuration += toTrackDuration(streamDuration: AVStreamDuration(avPacket.duration), avStream: stream); |
151 | streamData.bufferedSize += avPacket.size; |
152 | streamData.maxSentPacketsPos = qMax(a: streamData.maxSentPacketsPos, b: endPos); |
153 | updateStreamDataLimitFlag(streamData); |
154 | |
155 | if (!m_buffered && streamData.isDataLimitReached) { |
156 | m_buffered = true; |
157 | emit packetsBuffered(); |
158 | } |
159 | |
160 | if (!m_firstPacketFound) { |
161 | m_firstPacketFound = true; |
162 | emit firstPacketFound(id: id(), absSeekPos: m_posInLoopUs + m_loopOffset.loopStartTimeUs.asDuration()); |
163 | } |
164 | |
165 | auto signal = signalByTrackType(trackType: it->second.trackType); |
166 | emit (this->*signal)(packet); |
167 | } |
168 | |
169 | scheduleNextStep(allowDoImmediatelly: false); |
170 | } |
171 | |
172 | void Demuxer::onPacketProcessed(Packet packet) |
173 | { |
174 | Q_ASSERT(packet.isValid()); |
175 | |
176 | if (packet.sourceId() != id()) |
177 | return; |
178 | |
179 | auto &avPacket = *packet.avPacket(); |
180 | |
181 | const auto streamIndex = avPacket.stream_index; |
182 | const auto stream = m_context->streams[streamIndex]; |
183 | auto it = m_streams.find(x: streamIndex); |
184 | |
185 | if (it != m_streams.end()) { |
186 | auto &streamData = it->second; |
187 | |
188 | // Decrease buffered metrics as new data (the packet) has been received (buffered) |
189 | |
190 | streamData.bufferedDuration -= toTrackDuration(streamDuration: AVStreamDuration(avPacket.duration), avStream: stream); |
191 | streamData.bufferedSize -= avPacket.size; |
192 | streamData.maxProcessedPacketPos = |
193 | qMax(a: streamData.maxProcessedPacketPos, b: packetEndPos(packet, stream, context: m_context)); |
194 | |
195 | Q_ASSERT(it->second.bufferedDuration >= TrackDuration(0)); |
196 | Q_ASSERT(it->second.bufferedSize >= 0); |
197 | |
198 | updateStreamDataLimitFlag(streamData); |
199 | } |
200 | |
201 | scheduleNextStep(); |
202 | } |
203 | |
204 | std::chrono::milliseconds Demuxer::timerInterval() const |
205 | { |
206 | using namespace std::chrono_literals; |
207 | return m_demuxerRetryCount != 0 ? s_demuxerRetryInterval : PlaybackEngineObject::timerInterval(); |
208 | } |
209 | |
210 | bool Demuxer::canDoNextStep() const |
211 | { |
212 | auto isDataLimitReached = [](const auto &streamIndexToData) { |
213 | return streamIndexToData.second.isDataLimitReached; |
214 | }; |
215 | |
216 | // Demuxer waits: |
217 | // - if it's paused |
218 | // - if the end has been reached |
219 | // - if streams are empty (probably, should be handled on the initialization) |
220 | // - if at least one of the streams has reached the data limit (duration or size) |
221 | |
222 | return PlaybackEngineObject::canDoNextStep() && !isAtEnd() && !m_streams.empty() |
223 | && std::none_of(first: m_streams.begin(), last: m_streams.end(), pred: isDataLimitReached); |
224 | } |
225 | |
226 | void Demuxer::ensureSeeked() |
227 | { |
228 | if (std::exchange(obj&: m_seeked, new_val: true)) |
229 | return; |
230 | |
231 | if ((m_context->ctx_flags & AVFMTCTX_UNSEEKABLE) == 0) { |
232 | |
233 | // m_posInLoopUs is intended to be the number of microseconds since playback start, and is |
234 | // in the range [0, duration()]. av_seek_frame seeks to a position relative to the start of |
235 | // the media timeline, which may be non-zero. We adjust for this by adding the |
236 | // AVFormatContext's start_time. |
237 | // |
238 | // NOTE: m_posInLoop is not calculated correctly if the start_time is non-zero, but |
239 | // this must be fixed separately. |
240 | const AVContextPosition seekPos = toContextPosition(trackPosition: m_posInLoopUs, formatContext: m_context); |
241 | |
242 | qCDebug(qLcDemuxer).nospace() |
243 | << "Seeking to offset " << m_posInLoopUs.get() << "us from media start." ; |
244 | |
245 | auto err = av_seek_frame(s: m_context, stream_index: -1, timestamp: seekPos.get(), AVSEEK_FLAG_BACKWARD); |
246 | |
247 | if (err < 0) { |
248 | qCWarning(qLcDemuxer) << "Failed to seek, pos" << seekPos.get(); |
249 | |
250 | // Drop an error of seeking to initial position of streams with undefined duration. |
251 | // This needs improvements. |
252 | if (m_posInLoopUs != TrackPosition{ 0 } || m_context->duration > 0) |
253 | emit error(code: QMediaPlayer::ResourceError, |
254 | errorString: QLatin1StringView("Failed to seek: " ) + err2str(errnum: err)); |
255 | } |
256 | } |
257 | |
258 | setAtEnd(false); |
259 | } |
260 | |
261 | Demuxer::RequestingSignal Demuxer::signalByTrackType(QPlatformMediaPlayer::TrackType trackType) |
262 | { |
263 | switch (trackType) { |
264 | case QPlatformMediaPlayer::TrackType::VideoStream: |
265 | return &Demuxer::requestProcessVideoPacket; |
266 | case QPlatformMediaPlayer::TrackType::AudioStream: |
267 | return &Demuxer::requestProcessAudioPacket; |
268 | case QPlatformMediaPlayer::TrackType::SubtitleStream: |
269 | return &Demuxer::requestProcessSubtitlePacket; |
270 | default: |
271 | Q_ASSERT(!"Unknown track type" ); |
272 | } |
273 | |
274 | return nullptr; |
275 | } |
276 | |
277 | void Demuxer::setLoops(int loopsCount) |
278 | { |
279 | qCDebug(qLcDemuxer) << "setLoops to demuxer" << loopsCount; |
280 | m_loops.storeRelease(newValue: loopsCount); |
281 | } |
282 | |
283 | void Demuxer::updateStreamDataLimitFlag(StreamData &streamData) |
284 | { |
285 | const TrackDuration packetsPosDiff = |
286 | streamData.maxSentPacketsPos - streamData.maxProcessedPacketPos; |
287 | streamData.isDataLimitReached = streamData.bufferedDuration >= MaxBufferedDurationUs |
288 | || (streamData.bufferedDuration == TrackDuration(0) |
289 | && packetsPosDiff >= MaxBufferedDurationUs) |
290 | || streamData.bufferedSize >= MaxBufferedSize; |
291 | } |
292 | |
293 | } // namespace QFFmpeg |
294 | |
295 | QT_END_NAMESPACE |
296 | |
297 | #include "moc_qffmpegdemuxer_p.cpp" |
298 | |