1 | /**************************************************************************** |
2 | ** |
3 | ** Copyright (C) 2017 The Qt Company Ltd. |
4 | ** Contact: https://www.qt.io/licensing/ |
5 | ** |
6 | ** This file is part of the examples of the Qt Toolkit. |
7 | ** |
8 | ** $QT_BEGIN_LICENSE:BSD$ |
9 | ** Commercial License Usage |
10 | ** Licensees holding valid commercial Qt licenses may use this file in |
11 | ** accordance with the commercial license agreement provided with the |
12 | ** Software or, alternatively, in accordance with the terms contained in |
13 | ** a written agreement between you and The Qt Company. For licensing terms |
14 | ** and conditions see https://www.qt.io/terms-conditions. For further |
15 | ** information use the contact form at https://www.qt.io/contact-us. |
16 | ** |
17 | ** BSD License Usage |
18 | ** Alternatively, you may use this file under the terms of the BSD license |
19 | ** as follows: |
20 | ** |
21 | ** "Redistribution and use in source and binary forms, with or without |
22 | ** modification, are permitted provided that the following conditions are |
23 | ** met: |
24 | ** * Redistributions of source code must retain the above copyright |
25 | ** notice, this list of conditions and the following disclaimer. |
26 | ** * Redistributions in binary form must reproduce the above copyright |
27 | ** notice, this list of conditions and the following disclaimer in |
28 | ** the documentation and/or other materials provided with the |
29 | ** distribution. |
30 | ** * Neither the name of The Qt Company Ltd nor the names of its |
31 | ** contributors may be used to endorse or promote products derived |
32 | ** from this software without specific prior written permission. |
33 | ** |
34 | ** |
35 | ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
36 | ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
37 | ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
38 | ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
39 | ** OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
40 | ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
41 | ** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
42 | ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
43 | ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
44 | ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
45 | ** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." |
46 | ** |
47 | ** $QT_END_LICENSE$ |
48 | ** |
49 | ****************************************************************************/ |
50 | |
51 | #include "engine.h" |
52 | #include "tonegenerator.h" |
53 | #include "utils.h" |
54 | |
55 | #include <math.h> |
56 | |
57 | #include <QAudioInput> |
58 | #include <QAudioOutput> |
59 | #include <QCoreApplication> |
60 | #include <QDebug> |
61 | #include <QFile> |
62 | #include <QMetaObject> |
63 | #include <QSet> |
64 | #include <QThread> |
65 | |
66 | //----------------------------------------------------------------------------- |
67 | // Constants |
68 | //----------------------------------------------------------------------------- |
69 | |
70 | const qint64 BufferDurationUs = 10 * 1000000; |
71 | const int NotifyIntervalMs = 100; |
72 | |
73 | // Size of the level calculation window in microseconds |
74 | const int LevelWindowUs = 0.1 * 1000000; |
75 | |
76 | //----------------------------------------------------------------------------- |
77 | // Constructor and destructor |
78 | //----------------------------------------------------------------------------- |
79 | |
80 | Engine::Engine(QObject *parent) |
81 | : QObject(parent) |
82 | , m_mode(QAudio::AudioInput) |
83 | , m_state(QAudio::StoppedState) |
84 | , m_generateTone(false) |
85 | , m_file(0) |
86 | , m_analysisFile(0) |
87 | , m_availableAudioInputDevices |
88 | (QAudioDeviceInfo::availableDevices(mode: QAudio::AudioInput)) |
89 | , m_audioInputDevice(QAudioDeviceInfo::defaultInputDevice()) |
90 | , m_audioInput(0) |
91 | , m_audioInputIODevice(0) |
92 | , m_recordPosition(0) |
93 | , m_availableAudioOutputDevices |
94 | (QAudioDeviceInfo::availableDevices(mode: QAudio::AudioOutput)) |
95 | , m_audioOutputDevice(QAudioDeviceInfo::defaultOutputDevice()) |
96 | , m_audioOutput(0) |
97 | , m_playPosition(0) |
98 | , m_bufferPosition(0) |
99 | , m_bufferLength(0) |
100 | , m_dataLength(0) |
101 | , m_levelBufferLength(0) |
102 | , m_rmsLevel(0.0) |
103 | , m_peakLevel(0.0) |
104 | , m_spectrumBufferLength(0) |
105 | , m_spectrumAnalyser() |
106 | , m_spectrumPosition(0) |
107 | , m_count(0) |
108 | { |
109 | qRegisterMetaType<FrequencySpectrum>(typeName: "FrequencySpectrum" ); |
110 | qRegisterMetaType<WindowFunction>(typeName: "WindowFunction" ); |
111 | connect(sender: &m_spectrumAnalyser, signal: QOverload<const FrequencySpectrum&>::of(ptr: &SpectrumAnalyser::spectrumChanged), |
112 | receiver: this, slot: QOverload<const FrequencySpectrum&>::of(ptr: &Engine::spectrumChanged)); |
113 | |
114 | // This code might misinterpret things like "-something -category". But |
115 | // it's unlikely that that needs to be supported so we'll let it go. |
116 | QStringList arguments = QCoreApplication::instance()->arguments(); |
117 | for (int i = 0; i < arguments.count(); ++i) { |
118 | if (arguments.at(i) == QStringLiteral("--" )) |
119 | break; |
120 | |
121 | if (arguments.at(i) == QStringLiteral("-category" ) |
122 | || arguments.at(i) == QStringLiteral("--category" )) { |
123 | ++i; |
124 | if (i < arguments.count()) |
125 | m_audioOutputCategory = arguments.at(i); |
126 | else |
127 | --i; |
128 | } |
129 | } |
130 | |
131 | initialize(); |
132 | |
133 | #ifdef DUMP_DATA |
134 | createOutputDir(); |
135 | #endif |
136 | |
137 | #ifdef DUMP_SPECTRUM |
138 | m_spectrumAnalyser.setOutputPath(outputPath()); |
139 | #endif |
140 | } |
141 | |
142 | Engine::~Engine() |
143 | { |
144 | |
145 | } |
146 | |
147 | //----------------------------------------------------------------------------- |
148 | // Public functions |
149 | //----------------------------------------------------------------------------- |
150 | |
151 | bool Engine::loadFile(const QString &fileName) |
152 | { |
153 | reset(); |
154 | bool result = false; |
155 | Q_ASSERT(!m_generateTone); |
156 | Q_ASSERT(!m_file); |
157 | Q_ASSERT(!fileName.isEmpty()); |
158 | m_file = new WavFile(this); |
159 | if (m_file->open(fileName)) { |
160 | if (isPCMS16LE(format: m_file->fileFormat())) { |
161 | result = initialize(); |
162 | } else { |
163 | emit errorMessage(heading: tr(s: "Audio format not supported" ), |
164 | detail: formatToString(format: m_file->fileFormat())); |
165 | } |
166 | } else { |
167 | emit errorMessage(heading: tr(s: "Could not open file" ), detail: fileName); |
168 | } |
169 | if (result) { |
170 | m_analysisFile = new WavFile(this); |
171 | m_analysisFile->open(fileName); |
172 | } |
173 | return result; |
174 | } |
175 | |
176 | bool Engine::generateTone(const Tone &tone) |
177 | { |
178 | reset(); |
179 | Q_ASSERT(!m_generateTone); |
180 | Q_ASSERT(!m_file); |
181 | m_generateTone = true; |
182 | m_tone = tone; |
183 | ENGINE_DEBUG << "Engine::generateTone" |
184 | << "startFreq" << m_tone.startFreq |
185 | << "endFreq" << m_tone.endFreq |
186 | << "amp" << m_tone.amplitude; |
187 | return initialize(); |
188 | } |
189 | |
190 | bool Engine::generateSweptTone(qreal amplitude) |
191 | { |
192 | Q_ASSERT(!m_generateTone); |
193 | Q_ASSERT(!m_file); |
194 | m_generateTone = true; |
195 | m_tone.startFreq = 1; |
196 | m_tone.endFreq = 0; |
197 | m_tone.amplitude = amplitude; |
198 | ENGINE_DEBUG << "Engine::generateSweptTone" |
199 | << "startFreq" << m_tone.startFreq |
200 | << "amp" << m_tone.amplitude; |
201 | return initialize(); |
202 | } |
203 | |
204 | bool Engine::initializeRecord() |
205 | { |
206 | reset(); |
207 | ENGINE_DEBUG << "Engine::initializeRecord" ; |
208 | Q_ASSERT(!m_generateTone); |
209 | Q_ASSERT(!m_file); |
210 | m_generateTone = false; |
211 | m_tone = SweptTone(); |
212 | return initialize(); |
213 | } |
214 | |
215 | qint64 Engine::bufferLength() const |
216 | { |
217 | return m_file ? m_file->size() : m_bufferLength; |
218 | } |
219 | |
220 | void Engine::setWindowFunction(WindowFunction type) |
221 | { |
222 | m_spectrumAnalyser.setWindowFunction(type); |
223 | } |
224 | |
225 | |
226 | //----------------------------------------------------------------------------- |
227 | // Public slots |
228 | //----------------------------------------------------------------------------- |
229 | |
230 | void Engine::startRecording() |
231 | { |
232 | if (m_audioInput) { |
233 | if (QAudio::AudioInput == m_mode && |
234 | QAudio::SuspendedState == m_state) { |
235 | m_audioInput->resume(); |
236 | } else { |
237 | m_spectrumAnalyser.cancelCalculation(); |
238 | spectrumChanged(position: 0, length: 0, spectrum: FrequencySpectrum()); |
239 | |
240 | m_buffer.fill(c: 0); |
241 | setRecordPosition(position: 0, forceEmit: true); |
242 | stopPlayback(); |
243 | m_mode = QAudio::AudioInput; |
244 | connect(sender: m_audioInput, signal: &QAudioInput::stateChanged, |
245 | receiver: this, slot: &Engine::audioStateChanged); |
246 | connect(sender: m_audioInput, signal: &QAudioInput::notify, |
247 | receiver: this, slot: &Engine::audioNotify); |
248 | |
249 | m_count = 0; |
250 | m_dataLength = 0; |
251 | emit dataLengthChanged(duration: 0); |
252 | m_audioInputIODevice = m_audioInput->start(); |
253 | connect(sender: m_audioInputIODevice, signal: &QIODevice::readyRead, |
254 | receiver: this, slot: &Engine::audioDataReady); |
255 | } |
256 | } |
257 | } |
258 | |
259 | void Engine::startPlayback() |
260 | { |
261 | if (m_audioOutput) { |
262 | if (QAudio::AudioOutput == m_mode && |
263 | QAudio::SuspendedState == m_state) { |
264 | #ifdef Q_OS_WIN |
265 | // The Windows backend seems to internally go back into ActiveState |
266 | // while still returning SuspendedState, so to ensure that it doesn't |
267 | // ignore the resume() call, we first re-suspend |
268 | m_audioOutput->suspend(); |
269 | #endif |
270 | m_audioOutput->resume(); |
271 | } else { |
272 | m_spectrumAnalyser.cancelCalculation(); |
273 | spectrumChanged(position: 0, length: 0, spectrum: FrequencySpectrum()); |
274 | setPlayPosition(position: 0, forceEmit: true); |
275 | stopRecording(); |
276 | m_mode = QAudio::AudioOutput; |
277 | connect(sender: m_audioOutput, signal: &QAudioOutput::stateChanged, |
278 | receiver: this, slot: &Engine::audioStateChanged); |
279 | connect(sender: m_audioOutput, signal: &QAudioOutput::notify, |
280 | receiver: this, slot: &Engine::audioNotify); |
281 | |
282 | m_count = 0; |
283 | if (m_file) { |
284 | m_file->seek(offset: 0); |
285 | m_bufferPosition = 0; |
286 | m_dataLength = 0; |
287 | m_audioOutput->start(device: m_file); |
288 | } else { |
289 | m_audioOutputIODevice.close(); |
290 | m_audioOutputIODevice.setBuffer(&m_buffer); |
291 | m_audioOutputIODevice.open(openMode: QIODevice::ReadOnly); |
292 | m_audioOutput->start(device: &m_audioOutputIODevice); |
293 | } |
294 | } |
295 | } |
296 | } |
297 | |
298 | void Engine::suspend() |
299 | { |
300 | if (QAudio::ActiveState == m_state || |
301 | QAudio::IdleState == m_state) { |
302 | switch (m_mode) { |
303 | case QAudio::AudioInput: |
304 | m_audioInput->suspend(); |
305 | break; |
306 | case QAudio::AudioOutput: |
307 | m_audioOutput->suspend(); |
308 | break; |
309 | } |
310 | } |
311 | } |
312 | |
313 | void Engine::setAudioInputDevice(const QAudioDeviceInfo &device) |
314 | { |
315 | if (device.deviceName() != m_audioInputDevice.deviceName()) { |
316 | m_audioInputDevice = device; |
317 | initialize(); |
318 | } |
319 | } |
320 | |
321 | void Engine::setAudioOutputDevice(const QAudioDeviceInfo &device) |
322 | { |
323 | if (device.deviceName() != m_audioOutputDevice.deviceName()) { |
324 | m_audioOutputDevice = device; |
325 | initialize(); |
326 | } |
327 | } |
328 | |
329 | |
330 | //----------------------------------------------------------------------------- |
331 | // Private slots |
332 | //----------------------------------------------------------------------------- |
333 | |
334 | void Engine::audioNotify() |
335 | { |
336 | switch (m_mode) { |
337 | case QAudio::AudioInput: { |
338 | const qint64 recordPosition = qMin(a: m_bufferLength, b: audioLength(format: m_format, microSeconds: m_audioInput->processedUSecs())); |
339 | setRecordPosition(position: recordPosition); |
340 | const qint64 levelPosition = m_dataLength - m_levelBufferLength; |
341 | if (levelPosition >= 0) |
342 | calculateLevel(position: levelPosition, length: m_levelBufferLength); |
343 | if (m_dataLength >= m_spectrumBufferLength) { |
344 | const qint64 spectrumPosition = m_dataLength - m_spectrumBufferLength; |
345 | calculateSpectrum(position: spectrumPosition); |
346 | } |
347 | emit bufferChanged(position: 0, length: m_dataLength, buffer: m_buffer); |
348 | } |
349 | break; |
350 | case QAudio::AudioOutput: { |
351 | const qint64 playPosition = audioLength(format: m_format, microSeconds: m_audioOutput->processedUSecs()); |
352 | setPlayPosition(position: qMin(a: bufferLength(), b: playPosition)); |
353 | const qint64 levelPosition = playPosition - m_levelBufferLength; |
354 | const qint64 spectrumPosition = playPosition - m_spectrumBufferLength; |
355 | if (m_file) { |
356 | if (levelPosition > m_bufferPosition || |
357 | spectrumPosition > m_bufferPosition || |
358 | qMax(a: m_levelBufferLength, b: m_spectrumBufferLength) > m_dataLength) { |
359 | m_bufferPosition = 0; |
360 | m_dataLength = 0; |
361 | // Data needs to be read into m_buffer in order to be analysed |
362 | const qint64 readPos = qMax(a: qint64(0), b: qMin(a: levelPosition, b: spectrumPosition)); |
363 | const qint64 readEnd = qMin(a: m_analysisFile->size(), b: qMax(a: levelPosition + m_levelBufferLength, b: spectrumPosition + m_spectrumBufferLength)); |
364 | const qint64 readLen = readEnd - readPos + audioLength(format: m_format, microSeconds: WaveformWindowDuration); |
365 | qDebug() << "Engine::audioNotify [1]" |
366 | << "analysisFileSize" << m_analysisFile->size() |
367 | << "readPos" << readPos |
368 | << "readLen" << readLen; |
369 | if (m_analysisFile->seek(offset: readPos + m_analysisFile->headerLength())) { |
370 | m_buffer.resize(size: readLen); |
371 | m_bufferPosition = readPos; |
372 | m_dataLength = m_analysisFile->read(data: m_buffer.data(), maxlen: readLen); |
373 | qDebug() << "Engine::audioNotify [2]" << "bufferPosition" << m_bufferPosition << "dataLength" << m_dataLength; |
374 | } else { |
375 | qDebug() << "Engine::audioNotify [2]" << "file seek error" ; |
376 | } |
377 | emit bufferChanged(position: m_bufferPosition, length: m_dataLength, buffer: m_buffer); |
378 | } |
379 | } else { |
380 | if (playPosition >= m_dataLength) |
381 | stopPlayback(); |
382 | } |
383 | if (levelPosition >= 0 && levelPosition + m_levelBufferLength < m_bufferPosition + m_dataLength) |
384 | calculateLevel(position: levelPosition, length: m_levelBufferLength); |
385 | if (spectrumPosition >= 0 && spectrumPosition + m_spectrumBufferLength < m_bufferPosition + m_dataLength) |
386 | calculateSpectrum(position: spectrumPosition); |
387 | } |
388 | break; |
389 | } |
390 | } |
391 | |
392 | void Engine::audioStateChanged(QAudio::State state) |
393 | { |
394 | ENGINE_DEBUG << "Engine::audioStateChanged from" << m_state |
395 | << "to" << state; |
396 | |
397 | if (QAudio::IdleState == state && m_file && m_file->pos() == m_file->size()) { |
398 | stopPlayback(); |
399 | } else { |
400 | if (QAudio::StoppedState == state) { |
401 | // Check error |
402 | QAudio::Error error = QAudio::NoError; |
403 | switch (m_mode) { |
404 | case QAudio::AudioInput: |
405 | error = m_audioInput->error(); |
406 | break; |
407 | case QAudio::AudioOutput: |
408 | error = m_audioOutput->error(); |
409 | break; |
410 | } |
411 | if (QAudio::NoError != error) { |
412 | reset(); |
413 | return; |
414 | } |
415 | } |
416 | setState(state); |
417 | } |
418 | } |
419 | |
420 | void Engine::audioDataReady() |
421 | { |
422 | Q_ASSERT(0 == m_bufferPosition); |
423 | const qint64 bytesReady = m_audioInput->bytesReady(); |
424 | const qint64 bytesSpace = m_buffer.size() - m_dataLength; |
425 | const qint64 bytesToRead = qMin(a: bytesReady, b: bytesSpace); |
426 | |
427 | const qint64 bytesRead = m_audioInputIODevice->read( |
428 | data: m_buffer.data() + m_dataLength, |
429 | maxlen: bytesToRead); |
430 | |
431 | if (bytesRead) { |
432 | m_dataLength += bytesRead; |
433 | emit dataLengthChanged(duration: dataLength()); |
434 | } |
435 | |
436 | if (m_buffer.size() == m_dataLength) |
437 | stopRecording(); |
438 | } |
439 | |
440 | void Engine::spectrumChanged(const FrequencySpectrum &spectrum) |
441 | { |
442 | ENGINE_DEBUG << "Engine::spectrumChanged" << "pos" << m_spectrumPosition; |
443 | emit spectrumChanged(position: m_spectrumPosition, length: m_spectrumBufferLength, spectrum); |
444 | } |
445 | |
446 | |
447 | //----------------------------------------------------------------------------- |
448 | // Private functions |
449 | //----------------------------------------------------------------------------- |
450 | |
451 | void Engine::resetAudioDevices() |
452 | { |
453 | delete m_audioInput; |
454 | m_audioInput = 0; |
455 | m_audioInputIODevice = 0; |
456 | setRecordPosition(position: 0); |
457 | delete m_audioOutput; |
458 | m_audioOutput = 0; |
459 | setPlayPosition(position: 0); |
460 | m_spectrumPosition = 0; |
461 | setLevel(rmsLevel: 0.0, peakLevel: 0.0, numSamples: 0); |
462 | } |
463 | |
464 | void Engine::reset() |
465 | { |
466 | stopRecording(); |
467 | stopPlayback(); |
468 | setState(mode: QAudio::AudioInput, state: QAudio::StoppedState); |
469 | setFormat(QAudioFormat()); |
470 | m_generateTone = false; |
471 | delete m_file; |
472 | m_file = 0; |
473 | delete m_analysisFile; |
474 | m_analysisFile = 0; |
475 | m_buffer.clear(); |
476 | m_bufferPosition = 0; |
477 | m_bufferLength = 0; |
478 | m_dataLength = 0; |
479 | emit dataLengthChanged(duration: 0); |
480 | resetAudioDevices(); |
481 | } |
482 | |
483 | bool Engine::initialize() |
484 | { |
485 | bool result = false; |
486 | |
487 | QAudioFormat format = m_format; |
488 | |
489 | if (selectFormat()) { |
490 | if (m_format != format) { |
491 | resetAudioDevices(); |
492 | if (m_file) { |
493 | emit bufferLengthChanged(duration: bufferLength()); |
494 | emit dataLengthChanged(duration: dataLength()); |
495 | emit bufferChanged(position: 0, length: 0, buffer: m_buffer); |
496 | setRecordPosition(position: bufferLength()); |
497 | result = true; |
498 | } else { |
499 | m_bufferLength = audioLength(format: m_format, microSeconds: BufferDurationUs); |
500 | m_buffer.resize(size: m_bufferLength); |
501 | m_buffer.fill(c: 0); |
502 | emit bufferLengthChanged(duration: bufferLength()); |
503 | if (m_generateTone) { |
504 | if (0 == m_tone.endFreq) { |
505 | const qreal nyquist = nyquistFrequency(format: m_format); |
506 | m_tone.endFreq = qMin(a: qreal(SpectrumHighFreq), b: nyquist); |
507 | } |
508 | // Call function defined in utils.h, at global scope |
509 | ::generateTone(tone: m_tone, format: m_format, buffer&: m_buffer); |
510 | m_dataLength = m_bufferLength; |
511 | emit dataLengthChanged(duration: dataLength()); |
512 | emit bufferChanged(position: 0, length: m_dataLength, buffer: m_buffer); |
513 | setRecordPosition(position: m_bufferLength); |
514 | result = true; |
515 | } else { |
516 | emit bufferChanged(position: 0, length: 0, buffer: m_buffer); |
517 | m_audioInput = new QAudioInput(m_audioInputDevice, m_format, this); |
518 | m_audioInput->setNotifyInterval(NotifyIntervalMs); |
519 | result = true; |
520 | } |
521 | } |
522 | m_audioOutput = new QAudioOutput(m_audioOutputDevice, m_format, this); |
523 | m_audioOutput->setNotifyInterval(NotifyIntervalMs); |
524 | m_audioOutput->setCategory(m_audioOutputCategory); |
525 | } |
526 | } else { |
527 | if (m_file) |
528 | emit errorMessage(heading: tr(s: "Audio format not supported" ), |
529 | detail: formatToString(format: m_format)); |
530 | else if (m_generateTone) |
531 | emit errorMessage(heading: tr(s: "No suitable format found" ), detail: "" ); |
532 | else |
533 | emit errorMessage(heading: tr(s: "No common input / output format found" ), detail: "" ); |
534 | } |
535 | |
536 | ENGINE_DEBUG << "Engine::initialize" << "m_bufferLength" << m_bufferLength; |
537 | ENGINE_DEBUG << "Engine::initialize" << "m_dataLength" << m_dataLength; |
538 | ENGINE_DEBUG << "Engine::initialize" << "format" << m_format; |
539 | ENGINE_DEBUG << "Engine::initialize" << "m_audioOutputCategory" << m_audioOutputCategory; |
540 | |
541 | return result; |
542 | } |
543 | |
544 | bool Engine::selectFormat() |
545 | { |
546 | bool foundSupportedFormat = false; |
547 | |
548 | if (m_file || QAudioFormat() != m_format) { |
549 | QAudioFormat format = m_format; |
550 | if (m_file) |
551 | // Header is read from the WAV file; just need to check whether |
552 | // it is supported by the audio output device |
553 | format = m_file->fileFormat(); |
554 | if (m_audioOutputDevice.isFormatSupported(format)) { |
555 | setFormat(format); |
556 | foundSupportedFormat = true; |
557 | } |
558 | } else { |
559 | |
560 | QList<int> sampleRatesList; |
561 | #ifdef Q_OS_WIN |
562 | // The Windows audio backend does not correctly report format support |
563 | // (see QTBUG-9100). Furthermore, although the audio subsystem captures |
564 | // at 11025Hz, the resulting audio is corrupted. |
565 | sampleRatesList += 8000; |
566 | #endif |
567 | |
568 | if (!m_generateTone) |
569 | sampleRatesList += m_audioInputDevice.supportedSampleRates(); |
570 | |
571 | sampleRatesList += m_audioOutputDevice.supportedSampleRates(); |
572 | std::sort(first: sampleRatesList.begin(), last: sampleRatesList.end()); |
573 | const auto uniqueRatesEnd = std::unique(first: sampleRatesList.begin(), last: sampleRatesList.end()); |
574 | sampleRatesList.erase(afirst: uniqueRatesEnd, alast: sampleRatesList.end()); |
575 | ENGINE_DEBUG << "Engine::initialize frequenciesList" << sampleRatesList; |
576 | |
577 | QList<int> channelsList; |
578 | channelsList += m_audioInputDevice.supportedChannelCounts(); |
579 | channelsList += m_audioOutputDevice.supportedChannelCounts(); |
580 | std::sort(first: channelsList.begin(), last: channelsList.end()); |
581 | const auto uniqueChannelsEnd = std::unique(first: channelsList.begin(), last: channelsList.end()); |
582 | channelsList.erase(afirst: uniqueChannelsEnd, alast: channelsList.end()); |
583 | ENGINE_DEBUG << "Engine::initialize channelsList" << channelsList; |
584 | |
585 | QAudioFormat format; |
586 | format.setByteOrder(QAudioFormat::LittleEndian); |
587 | format.setCodec("audio/pcm" ); |
588 | format.setSampleSize(16); |
589 | format.setSampleType(QAudioFormat::SignedInt); |
590 | for (int sampleRate : qAsConst(t&: sampleRatesList)) { |
591 | if (foundSupportedFormat) |
592 | break; |
593 | format.setSampleRate(sampleRate); |
594 | for (int channels : qAsConst(t&: channelsList)) { |
595 | format.setChannelCount(channels); |
596 | const bool inputSupport = m_generateTone || |
597 | m_audioInputDevice.isFormatSupported(format); |
598 | const bool outputSupport = m_audioOutputDevice.isFormatSupported(format); |
599 | ENGINE_DEBUG << "Engine::initialize checking " << format |
600 | << "input" << inputSupport |
601 | << "output" << outputSupport; |
602 | if (inputSupport && outputSupport) { |
603 | foundSupportedFormat = true; |
604 | break; |
605 | } |
606 | } |
607 | } |
608 | |
609 | if (!foundSupportedFormat) |
610 | format = QAudioFormat(); |
611 | |
612 | setFormat(format); |
613 | } |
614 | |
615 | return foundSupportedFormat; |
616 | } |
617 | |
618 | void Engine::stopRecording() |
619 | { |
620 | if (m_audioInput) { |
621 | m_audioInput->stop(); |
622 | QCoreApplication::instance()->processEvents(); |
623 | m_audioInput->disconnect(); |
624 | } |
625 | m_audioInputIODevice = 0; |
626 | |
627 | #ifdef DUMP_AUDIO |
628 | dumpData(); |
629 | #endif |
630 | } |
631 | |
632 | void Engine::stopPlayback() |
633 | { |
634 | if (m_audioOutput) { |
635 | m_audioOutput->stop(); |
636 | QCoreApplication::instance()->processEvents(); |
637 | m_audioOutput->disconnect(); |
638 | setPlayPosition(position: 0); |
639 | } |
640 | } |
641 | |
642 | void Engine::setState(QAudio::State state) |
643 | { |
644 | const bool changed = (m_state != state); |
645 | m_state = state; |
646 | if (changed) |
647 | emit stateChanged(mode: m_mode, state: m_state); |
648 | } |
649 | |
650 | void Engine::setState(QAudio::Mode mode, QAudio::State state) |
651 | { |
652 | const bool changed = (m_mode != mode || m_state != state); |
653 | m_mode = mode; |
654 | m_state = state; |
655 | if (changed) |
656 | emit stateChanged(mode: m_mode, state: m_state); |
657 | } |
658 | |
659 | void Engine::setRecordPosition(qint64 position, bool forceEmit) |
660 | { |
661 | const bool changed = (m_recordPosition != position); |
662 | m_recordPosition = position; |
663 | if (changed || forceEmit) |
664 | emit recordPositionChanged(position: m_recordPosition); |
665 | } |
666 | |
667 | void Engine::setPlayPosition(qint64 position, bool forceEmit) |
668 | { |
669 | const bool changed = (m_playPosition != position); |
670 | m_playPosition = position; |
671 | if (changed || forceEmit) |
672 | emit playPositionChanged(position: m_playPosition); |
673 | } |
674 | |
675 | void Engine::calculateLevel(qint64 position, qint64 length) |
676 | { |
677 | #ifdef DISABLE_LEVEL |
678 | Q_UNUSED(position) |
679 | Q_UNUSED(length) |
680 | #else |
681 | Q_ASSERT(position + length <= m_bufferPosition + m_dataLength); |
682 | |
683 | qreal peakLevel = 0.0; |
684 | |
685 | qreal sum = 0.0; |
686 | const char *ptr = m_buffer.constData() + position - m_bufferPosition; |
687 | const char *const end = ptr + length; |
688 | while (ptr < end) { |
689 | const qint16 value = *reinterpret_cast<const qint16*>(ptr); |
690 | const qreal fracValue = pcmToReal(pcm: value); |
691 | peakLevel = qMax(a: peakLevel, b: fracValue); |
692 | sum += fracValue * fracValue; |
693 | ptr += 2; |
694 | } |
695 | const int numSamples = length / 2; |
696 | qreal rmsLevel = sqrt(x: sum / numSamples); |
697 | |
698 | rmsLevel = qMax(a: qreal(0.0), b: rmsLevel); |
699 | rmsLevel = qMin(a: qreal(1.0), b: rmsLevel); |
700 | setLevel(rmsLevel, peakLevel, numSamples); |
701 | |
702 | ENGINE_DEBUG << "Engine::calculateLevel" << "pos" << position << "len" << length |
703 | << "rms" << rmsLevel << "peak" << peakLevel; |
704 | #endif |
705 | } |
706 | |
707 | void Engine::calculateSpectrum(qint64 position) |
708 | { |
709 | #ifdef DISABLE_SPECTRUM |
710 | Q_UNUSED(position) |
711 | #else |
712 | Q_ASSERT(position + m_spectrumBufferLength <= m_bufferPosition + m_dataLength); |
713 | Q_ASSERT(0 == m_spectrumBufferLength % 2); // constraint of FFT algorithm |
714 | |
715 | // QThread::currentThread is marked 'for internal use only', but |
716 | // we're only using it for debug output here, so it's probably OK :) |
717 | ENGINE_DEBUG << "Engine::calculateSpectrum" << QThread::currentThread() |
718 | << "count" << m_count << "pos" << position << "len" << m_spectrumBufferLength |
719 | << "spectrumAnalyser.isReady" << m_spectrumAnalyser.isReady(); |
720 | |
721 | if (m_spectrumAnalyser.isReady()) { |
722 | m_spectrumBuffer = QByteArray::fromRawData(m_buffer.constData() + position - m_bufferPosition, |
723 | size: m_spectrumBufferLength); |
724 | m_spectrumPosition = position; |
725 | m_spectrumAnalyser.calculate(buffer: m_spectrumBuffer, format: m_format); |
726 | } |
727 | #endif |
728 | } |
729 | |
730 | void Engine::setFormat(const QAudioFormat &format) |
731 | { |
732 | const bool changed = (format != m_format); |
733 | m_format = format; |
734 | m_levelBufferLength = audioLength(format: m_format, microSeconds: LevelWindowUs); |
735 | m_spectrumBufferLength = SpectrumLengthSamples * |
736 | (m_format.sampleSize() / 8) * m_format.channelCount(); |
737 | if (changed) |
738 | emit formatChanged(format: m_format); |
739 | } |
740 | |
741 | void Engine::setLevel(qreal rmsLevel, qreal peakLevel, int numSamples) |
742 | { |
743 | m_rmsLevel = rmsLevel; |
744 | m_peakLevel = peakLevel; |
745 | emit levelChanged(rmsLevel: m_rmsLevel, peakLevel: m_peakLevel, numSamples); |
746 | } |
747 | |
748 | #ifdef DUMP_DATA |
749 | void Engine::createOutputDir() |
750 | { |
751 | m_outputDir.setPath("output" ); |
752 | |
753 | // Ensure output directory exists and is empty |
754 | if (m_outputDir.exists()) { |
755 | const QStringList files = m_outputDir.entryList(QDir::Files); |
756 | for (const QString &file : files) |
757 | m_outputDir.remove(file); |
758 | } else { |
759 | QDir::current().mkdir("output" ); |
760 | } |
761 | } |
762 | #endif // DUMP_DATA |
763 | |
764 | #ifdef DUMP_AUDIO |
765 | void Engine::dumpData() |
766 | { |
767 | const QString txtFileName = m_outputDir.filePath("data.txt" ); |
768 | QFile txtFile(txtFileName); |
769 | txtFile.open(QFile::WriteOnly | QFile::Text); |
770 | QTextStream stream(&txtFile); |
771 | const qint16 *ptr = reinterpret_cast<const qint16*>(m_buffer.constData()); |
772 | const int numSamples = m_dataLength / (2 * m_format.channels()); |
773 | for (int i=0; i<numSamples; ++i) { |
774 | stream << i << "\t" << *ptr << "\n" ; |
775 | ptr += m_format.channels(); |
776 | } |
777 | |
778 | const QString pcmFileName = m_outputDir.filePath("data.pcm" ); |
779 | QFile pcmFile(pcmFileName); |
780 | pcmFile.open(QFile::WriteOnly); |
781 | pcmFile.write(m_buffer.constData(), m_dataLength); |
782 | } |
783 | #endif // DUMP_AUDIO |
784 | |