问题描述
我有一个带有麦克风的设备,它通过以太网连接到我的计算机,但 Qt 无法将其视为音频设备,因此,我从中获取数据包并将它们放入 QByteArray.我需要从流中播放这些数据包.我在互联网的某个地方找到了几乎相同问题的解决方案,但使用了内置麦克风.
I have a device with microphones that connects to my computer through Ethernet and it cannot be seen by Qt as an audio device, so, I get packets from it and put them to QByteArray. I need to play these packets from stream. Somewhere in the Internet I found a solution to almost the same problem, but there internal microphone was used.
#include <QApplication>
#include <iostream>
#include <cassert>
#include <QCoreApplication>
#include <QAudioInput>
#include <QAudioOutput>
#include <QBuffer>
int main(int argc, char *argv[]) {
QCoreApplication app(argc, argv);
QBuffer rdBuff;
QBuffer wrBuff;
wrBuff.open(QBuffer::WriteOnly);
rdBuff.open(QBuffer::ReadOnly);
QObject::connect(&wrBuff, &QIODevice::bytesWritten, [&wrBuff, &rdBuff](qint64)
{
rdBuff.buffer().remove(0, rdBuff.pos());
// set pointer to the beginning of the unread data
const auto res = rdBuff.seek(0);
assert(res);
// write new data
rdBuff.buffer().append(wrBuff.buffer());
// remove all data that was already written
wrBuff.buffer().clear();
wrBuff.seek(0);
});
const auto decideAudioFormat = [](const QAudioDeviceInfo& devInfo)
{
QAudioFormat format;
format.setSampleRate(8000);
format.setChannelCount(1);
format.setSampleSize(16);
format.setCodec("audio/pcm");
format.setByteOrder(QAudioFormat::LittleEndian);
format.setSampleType(QAudioFormat::SignedInt);
if (devInfo.isFormatSupported(format))
{
return format;
}
else
{
std::cerr << "Raw audio format not supported by backend, cannot play audio.\n";
throw 0;
}
};
QAudioInput audioInput(decideAudioFormat(QAudioDeviceInfo::defaultInputDevice()));
QAudioOutput audioOutput(decideAudioFormat(QAudioDeviceInfo::defaultOutputDevice()));
audioInput.start(&wrBuff);
audioOutput.start(&rdBuff);
return app.exec();
}
效果很好,但我需要将 QByteArray 设置为 QAudioInput 的源.有什么可能的解决方案吗?
It works quite well, but I need to set QByteArray as QAudioInput's source.Is there any possible solution?
推荐答案
不确定我是否直接回答您的问题.但一种可能的解决方案是在新数据到来时手动(推送模式)馈送输出音频设备.
Not sure if i'm directly answering your question. But a possible solution is feed the output audio device manually (push mode) when new data comes.
您还可以使用自定义(QFile 继承)类来录制声音,当声音来时,同时提供文件和输出音频设备.
You can also use a custom (QFile inherited) class to record sound, and when sound come, feeds both the file and output audio device.
这是一个例子:
AudioOutput.h:
AudioOutput.h:
#ifndef AUDIOOUTPUT_H
#define AUDIOOUTPUT_H
#include <QtCore>
#include <QtMultimedia>
#define MAX_BUFFERED_TIME 10*1000
static inline int timeToSize(int ms, const QAudioFormat &format)
{
return ((format.channelCount() * (format.sampleSize() / 8) * format.sampleRate()) * ms / 1000);
}
class AudioOutput : public QObject
{
Q_OBJECT
public:
explicit AudioOutput(QObject *parent = nullptr);
public slots:
bool start(const QAudioDeviceInfo &devinfo,
const QAudioFormat &format,
int time_to_buffer);
void write(const QByteArray &data);
private slots:
void verifyBuffer();
void preplay();
void play();
private:
bool m_initialized;
QAudioOutput *m_audio_output;
QIODevice *m_device;
QByteArray m_buffer;
bool m_buffer_requested;
bool m_play_called;
int m_size_to_buffer;
int m_time_to_buffer;
int m_max_size_to_buffer;
QAudioFormat m_format;
};
#endif // AUDIOOUTPUT_H
AudioRecorder.h:
AudioRecorder.h:
#ifndef AUDIORECORDER_H
#define AUDIORECORDER_H
#include <QtCore>
#include <QtMultimedia>
class AudioRecorder : public QFile
{
Q_OBJECT
public:
explicit AudioRecorder(const QString &name, const QAudioFormat &format, QObject *parent = nullptr);
~AudioRecorder();
using QFile::open;
public slots:
bool open();
qint64 write(const QByteArray &data);
void close();
private:
void writeHeader();
bool hasSupportedFormat();
QAudioFormat format;
};
#endif // AUDIORECORDER_H
AudioOutput.cpp:
AudioOutput.cpp:
#include "audiooutput.h"
AudioOutput::AudioOutput(QObject *parent) : QObject(parent)
{
m_initialized = false;
m_audio_output = nullptr;
m_device = nullptr;
m_buffer_requested = true;
m_play_called = false;
m_size_to_buffer = 0;
m_time_to_buffer = 0;
m_max_size_to_buffer = 0;
}
bool AudioOutput::start(const QAudioDeviceInfo &devinfo,
const QAudioFormat &format,
int time_to_buffer)
{
if (!devinfo.isFormatSupported(format))
{
qDebug() << "Format not supported by output device";
return m_initialized;
}
m_format = format;
int internal_buffer_size;
//Adjust internal buffer size
if (format.sampleRate() >= 44100)
internal_buffer_size = (1024 * 10) * format.channelCount();
else if (format.sampleRate() >= 24000)
internal_buffer_size = (1024 * 6) * format.channelCount();
else
internal_buffer_size = (1024 * 4) * format.channelCount();
//Initialize the audio output device
m_audio_output = new QAudioOutput(devinfo, format, this);
//Increase the buffer size to enable higher sample rates
m_audio_output->setBufferSize(internal_buffer_size);
m_time_to_buffer = time_to_buffer;
//Compute the size in bytes to be buffered based on the current format
m_size_to_buffer = timeToSize(m_time_to_buffer, m_format);
//Define a highest size that the buffer are allowed to have in the given time
//This value is used to discard too old buffered data
m_max_size_to_buffer = m_size_to_buffer + timeToSize(MAX_BUFFERED_TIME, m_format);
m_device = m_audio_output->start();
if (!m_device)
{
qDebug() << "Failed to open output audio device";
return m_initialized;
}
//Timer that helps to keep playing data while it's available on the internal buffer
QTimer *timer_play = new QTimer(this);
timer_play->setTimerType(Qt::PreciseTimer);
connect(timer_play, &QTimer::timeout, this, &AudioOutput::preplay);
timer_play->start(10);
//Timer that checks for too old data in the buffer
QTimer *timer_verifier = new QTimer(this);
connect(timer_verifier, &QTimer::timeout, this, &AudioOutput::verifyBuffer);
timer_verifier->start(qMax(m_time_to_buffer, 10));
m_initialized = true;
return m_initialized;
}
void AudioOutput::verifyBuffer()
{
if (m_buffer.size() >= m_max_size_to_buffer)
m_buffer.clear();
}
void AudioOutput::write(const QByteArray &data)
{
m_buffer.append(data);
preplay();
}
void AudioOutput::preplay()
{
if (!m_initialized)
return;
//Verify if exists a pending call to play function
//If not, call the play function async
if (!m_play_called)
{
m_play_called = true;
QMetaObject::invokeMethod(this, "play", Qt::QueuedConnection);
}
}
void AudioOutput::play()
{
//Set that last async call was triggered
m_play_called = false;
if (m_buffer.isEmpty())
{
//If data is empty set that nothing should be played
//until the buffer has at least the minimum buffered size already set
m_buffer_requested = true;
return;
}
else if (m_buffer.size() < m_size_to_buffer)
{
//If buffer doesn't contains enough data,
//check if exists a already flag telling that the buffer comes
//from a empty state and should not play anything until have the minimum data size
if (m_buffer_requested)
return;
}
else
{
//Buffer is ready and data can be played
m_buffer_requested = false;
}
int readlen = m_audio_output->periodSize();
int chunks = m_audio_output->bytesFree() / readlen;
//Play data while it's available in the output device
while (chunks)
{
//Get chunk from the buffer
QByteArray samples = m_buffer.mid(0, readlen);
int len = samples.size();
m_buffer.remove(0, len);
//Write data to the output device
if (len)
m_device->write(samples);
//If chunk is smaller than the output chunk size, exit loop
if (len != readlen)
break;
//Decrease the available number of chunks
chunks--;
}
}
AudioRecorder.cpp:
AudioRecorder.cpp:
#include "audiorecorder.h"
AudioRecorder::AudioRecorder(const QString &name, const QAudioFormat &format, QObject *parent) : QFile(name, parent), format(format)
{
}
AudioRecorder::~AudioRecorder()
{
if (!isOpen())
return;
close();
}
bool AudioRecorder::hasSupportedFormat()
{
return (format.sampleSize() == 8
&& format.sampleType() == QAudioFormat::UnSignedInt)
|| (format.sampleSize() > 8
&& format.sampleType() == QAudioFormat::SignedInt
&& format.byteOrder() == QAudioFormat::LittleEndian);
}
bool AudioRecorder::open()
{
if (!hasSupportedFormat())
{
setErrorString("Wav PCM supports only 8-bit unsigned samples "
"or 16-bit (or more) signed samples (in little endian)");
return false;
}
else
{
if (!QFile::open(ReadWrite | Truncate))
return false;
writeHeader();
return true;
}
}
qint64 AudioRecorder::write(const QByteArray &data)
{
return QFile::write(data);
}
void AudioRecorder::writeHeader()
{
QDataStream out(this);
out.setByteOrder(QDataStream::LittleEndian);
// RIFF chunk
out.writeRawData("RIFF", 4);
out << quint32(0); // Placeholder for the RIFF chunk size (filled by close())
out.writeRawData("WAVE", 4);
// Format description chunk
out.writeRawData("fmt ", 4);
out << quint32(16); // "fmt " chunk size (always 16 for PCM)
out << quint16(1); // data format (1 => PCM)
out << quint16(format.channelCount());
out << quint32(format.sampleRate());
out << quint32(format.sampleRate() * format.channelCount()
* format.sampleSize() / 8 ); // bytes per second
out << quint16(format.channelCount() * format.sampleSize() / 8); // Block align
out << quint16(format.sampleSize()); // Significant Bits Per Sample
// Data chunk
out.writeRawData("data", 4);
out << quint32(0); // Placeholder for the data chunk size (filled by close())
Q_ASSERT(pos() == 44); // Must be 44 for WAV PCM
}
void AudioRecorder::close()
{
// Fill the header size placeholders
quint32 fileSize = size();
QDataStream out(this);
// Set the same ByteOrder like in writeHeader()
out.setByteOrder(QDataStream::LittleEndian);
// RIFF chunk size
seek(4);
out << quint32(fileSize - 8);
// data chunk size
seek(40);
out << quint32(fileSize - 44);
QFile::close();
}
main.cpp:
#include <QtCore>
#include "audiooutput.h"
#include "audiorecorder.h"
#include <signal.h>
QByteArray tone_generator()
{
//Tone generator from http://www.cplusplus.com/forum/general/129827/
const unsigned int samplerate = 8000;
const unsigned short channels = 1;
const double pi = M_PI;
const qint16 amplitude = std::numeric_limits<qint16>::max() * 0.5;
const unsigned short n_frequencies = 8;
const unsigned short n_seconds_each = 1;
float frequencies[n_frequencies] = {55.0, 110.0, 220.0, 440.0, 880.0, 1760.0, 3520.0, 7040.0};
const int n_samples = channels * samplerate * n_frequencies * n_seconds_each;
QVector<qint16> data;
data.resize(n_samples);
int index = n_samples / n_frequencies;
for (unsigned short i = 0; i < n_frequencies; ++i)
{
float freq = frequencies[i];
double d = (samplerate / freq);
int c = 0;
for (int j = index * i; j < index * (i + 1); j += 2)
{
double deg = 360.0 / d;
data[j] = data[j + (channels - 1)] = qSin((c++ * deg) * pi / 180.0) * amplitude;
}
}
return QByteArray((char*)data.data(), data.size() * sizeof(qint16));
}
void signalHandler(int signum)
{
qDebug().nospace() << "Interrupt signal (" << signum << ") received.";
qApp->exit();
}
int main(int argc, char *argv[])
{
//Handle console close to ensure destructors being called
#ifdef Q_OS_WIN
signal(SIGBREAK, signalHandler);
#else
signal(SIGHUP, signalHandler);
#endif
signal(SIGINT, signalHandler);
QCoreApplication a(argc, argv);
QAudioFormat format;
format.setSampleRate(8000);
format.setChannelCount(1);
format.setSampleSize(16);
format.setCodec("audio/pcm");
format.setByteOrder(QAudioFormat::LittleEndian);
format.setSampleType(QAudioFormat::SignedInt);
AudioOutput output;
AudioRecorder file("tone.wav", format);
if (!output.start(QAudioDeviceInfo::defaultOutputDevice(), format, 10 * 1000)) //10 seconds of buffer
return a.exec();
if (!file.open())
{
qDebug() << qPrintable(file.errorString());
return a.exec();
}
qDebug() << "Started!";
QByteArray audio_data = tone_generator();
QTimer timer;
QObject::connect(&timer, &QTimer::timeout, [&]{
qDebug() << "Writting" << audio_data.size() << "bytes";
output.write(audio_data);
file.write(audio_data);
});
qDebug() << "Writting" << audio_data.size() << "bytes";
output.write(audio_data);
file.write(audio_data);
timer.start(8000); //8 seconds because we generated 8 seconds of sound
return a.exec();
}
这篇关于来自数组的 QAudioInput的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!