我正在尝试为 VOIP 场景增加麦克风的增益。
我正在使用 PortAudio 获取输入流(样本类型为 paFloat32),我将这些值乘以浮点数,然后将结果流传递给输出设备。
注意:我将它传递给一个虚拟输出设备,该设备自动重定向到一个虚拟输入设备(程序:VB-Cable),VOIP 应用程序可以将其用作麦克风输入并应用增益。
我想知道是否有更好的方法来增加信号增益以更好地保持质量。
我读过,最好先将输入转换为更高精度的格式,以这种格式执行增益乘法,应用裁剪,然后再转换回原始格式,从而更好地执行此类增益计算。
我不确定如何使用 PortAudio 的 paFloat32 类型执行此操作,我已在源代码中注释掉了我的尝试。当我启用它时,即使增益设置为 1,也会出现明显的噪声问题。
依赖项:tinycon, PortAudio
编译:g++ main.cpp tinycon.cpp -o main -L./ -lcygportaudio-2 -lrt -lm -pthread -std=c++11
代码:
#include "portaudio.h"
#include <iostream>
#include <chrono>
#include <thread>
#include <mutex>
#include "tinycon.h"
#define SAMPLE_RATE (44100)
#define FRAMES_PER_BUFFER (441)
#define DITHER_FLAG (1)
#define PA_SAMPLE_TYPE paFloat32
#define SAMPLE_SIZE (4)
#define SAMPLE_SILENCE (0)
#define PRINTF_S_FORMAT "%f"
/*******************************************************************/
double multiplier = 1.0;
double multiplierStep = 0.1;
int main(int argc, char **argv);
int xrun(PaStream *stream, int err, char* sampleBlock);
void error1(PaStream *stream, char* sampleBlock);
void error2(PaStream *stream, int err);
void listDevices();
// Use tinycon and a second thread for non blocking input
class tcon : public tinyConsole
{
public:
tcon (std::string s): tinyConsole(s) {;}
int hotkeys(char c)
{
if (c == 's') {
if (multiplier >= (0+multiplierStep)) {
multiplier -= multiplierStep;
}
printf( "Multiplier: %f\n", multiplier );
return 1;
}
if (c == 'w') {
multiplier += multiplierStep;
printf( "Multiplier: %f\n", multiplier );
return 1;
}
return 0;
}
};
int inputThread() {
tcon tc (std::string(""));
tc.run();
}
void listDevices() {
int i, numDevices, defaultDisplayed;
const PaDeviceInfo *deviceInfo;
Pa_Initialize();
numDevices = Pa_GetDeviceCount();
printf( "Number of devices = %d\n", numDevices );
int isInputDevice = 0;
for( i=0; i<numDevices; i++ )
{
deviceInfo = Pa_GetDeviceInfo( i );
int isInputDevice = (deviceInfo->maxInputChannels > 0);
printf( "%sDeviceID: %d, Name: %s\n", (isInputDevice ? "Input" : "Output"), i, deviceInfo->name);
}
fprintf (stderr, "Press any key to close\n");
getch();
}
int main (int argc, char **argv)
{
int c;
int inputDeviceId = -1;
int outputDeviceId = -1;
opterr = 0;
const char* helpMessage =
"-h : show this help message\n"
"-i <int> : select the INPUT DEVICE by id\n"
"-o <int> : select the OUPUT DEVICE by id\n"
"-m <double> : SIGNAL MULTIPLIER\n"
"-s <double> : SIGNAL MULTIPLIER STEP (press w or s while console focused to go up and down by this ammount.\n"
"-d : list devices\n";
while ((c = getopt (argc, argv, "i:o:l:m:s:hd")) != -1) {
switch (c) {
case 'i':
inputDeviceId = atoi(optarg);
break;
case 'o':
outputDeviceId = atoi(optarg);
break;
case 'm':
multiplier = atof(optarg);
break;
case 's':
multiplierStep = atof(optarg);
break;
case 'd':
listDevices();
return 0;
case '?':
if (isprint (optopt))
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt);
case 'h':
fprintf (stderr, helpMessage);
fprintf (stderr, "Press any key to close\n");
getch();
return 1;
default:
abort ();
}
}
// Start non blocking input thread
std::thread nonBlockingInputThread(inputThread);
PaStreamParameters inputParameters, outputParameters;
PaStream *stream = NULL;
PaError err;
const PaDeviceInfo* inputInfo;
const PaDeviceInfo* outputInfo;
char *sampleBlock = NULL;
int i;
int numBytes;
int numChannels;
err = Pa_Initialize();
if( err != paNoError ) error2(stream, err);
inputParameters.device = (inputDeviceId == -1) ? Pa_GetDefaultInputDevice() : inputDeviceId; /* default input device */
inputInfo = Pa_GetDeviceInfo( inputParameters.device );
outputParameters.device = (outputDeviceId == -1) ? Pa_GetDefaultOutputDevice() : outputDeviceId; /* default output device */
outputInfo = Pa_GetDeviceInfo( outputParameters.device );
numChannels = inputInfo->maxInputChannels < outputInfo->maxOutputChannels
? inputInfo->maxInputChannels : outputInfo->maxOutputChannels;
inputParameters.channelCount = numChannels;
inputParameters.sampleFormat = PA_SAMPLE_TYPE;
inputParameters.suggestedLatency = inputInfo->defaultHighInputLatency ;
inputParameters.hostApiSpecificStreamInfo = NULL;
printf( "Input device # %d.\n", inputParameters.device );
printf( " Name: %s\n", inputInfo->name );
outputParameters.channelCount = numChannels;
outputParameters.sampleFormat = PA_SAMPLE_TYPE;
outputParameters.suggestedLatency = outputInfo->defaultHighOutputLatency;
outputParameters.hostApiSpecificStreamInfo = NULL;
printf( "Output device # %d.\n", outputParameters.device );
printf( " Name: %s\n", outputInfo->name );
/* -- setup -- */
err = Pa_OpenStream(
&stream,
&inputParameters,
&outputParameters,
SAMPLE_RATE,
FRAMES_PER_BUFFER,
paClipOff, /* we won't output out of range samples so don't bother clipping them */
NULL, /* no callback, use blocking API */
NULL ); /* no callback, so no callback userData */
if( err != paNoError ) error2(stream, err);
numBytes = FRAMES_PER_BUFFER * numChannels * SAMPLE_SIZE ;
sampleBlock = (char *) malloc( numBytes );
if( sampleBlock == NULL )
{
printf("Could not allocate record array.\n");
error1(stream, sampleBlock);
}
err = Pa_StartStream( stream );
if( err != paNoError ) error1(stream, sampleBlock);
while (1) {
// You may get underruns or overruns if the output is not primed by PortAudio.
err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER );
if( err ) xrun(stream, err, sampleBlock);
int blockIndex;
float* sampleBlockShort = (float*)sampleBlock;
for (blockIndex = 0; blockIndex < FRAMES_PER_BUFFER; blockIndex++) {
/*
double dSample = (double)sampleBlockShort[blockIndex];
dSample *= multiplier;
if (dSample > 32767.0) dSample = 32767.0;
if (dSample < -32768.0) dSample = -32768.0;
sampleBlockShort[blockIndex] = (short)dSample;
*/
sampleBlockShort[blockIndex] *= multiplier;
}
err = Pa_WriteStream( stream, sampleBlock, FRAMES_PER_BUFFER );
if( err ) xrun(stream, err, sampleBlock);
}
printf("Wire off.\n"); fflush(stdout);
err = Pa_StopStream( stream );
if( err != paNoError ) error1(stream, sampleBlock);
free( sampleBlock );
Pa_Terminate();
return 0;
}
int xrun(PaStream *stream, int err, char* sampleBlock) {
printf("err = %d\n", err); fflush(stdout);
if( stream ) {
Pa_AbortStream( stream );
Pa_CloseStream( stream );
}
free( sampleBlock );
Pa_Terminate();
if( err & paInputOverflow )
fprintf( stderr, "Input Overflow.\n" );
if( err & paOutputUnderflow )
fprintf( stderr, "Output Underflow.\n" );
return -2;
}
void error1(PaStream *stream, char* sampleBlock) {
free( sampleBlock );
exit(-1);
}
void error2(PaStream *stream, int err) {
if( stream ) {
Pa_AbortStream( stream );
Pa_CloseStream( stream );
}
Pa_Terminate();
fprintf( stderr, "An error occured while using the portaudio stream\n" );
fprintf( stderr, "Error number: %d\n", err );
fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
exit(-1);
}
最佳答案
我发现您也可以为此使用 webrtc 库。它带有噪音抑制功能,非常方便。我不明白compression_gain_db 和target_level_dbfs 实际上是做什么的,但是将它们设置为最高值似乎应用了最大的增益。按照@alexander 的建议在 int16 中工作解决了我的自定义解决方案的许多问题,并且修复循环以遍历整个缓冲区也有帮助。 webrtc 的解决方案和我自己的解决方案都可以使用以下示例代码实时播放。
下面包含代码示例。
$ ./main.exe -h
-h : show this help message
-i <int> : select the INPUT DEVICE by id
-o <int> : select the OUPUT DEVICE by id
-c <int [0,90]> : compression_gain_db
-t <int [0, 31]> : target_level_dbfs
-g <0 or 1> : toggle webrtc gain control on and off (1 by default)
-k <0 or 1> : toggle custom gain control on and off (1 by default)
-f <int [1, maxInt]> : customGainControlFactor
-q <int [0, 3]> : webrtc noise supression level, high is more suppression
-e <0 or 1> : toggle webrtc noise suppression on and off (1 by default)
-d : list devices
Real time controls:
compression_gain_db UP_KEY='a' DOWN_KEY='s'
target_dbfs_level UP_KEY='d' DOWN_KEY='f'
webrtcGainControlEnabled TOGGLE_KEY='g'
webrtcNoiseSuppressionLevel UP_KEY='q' DOWN_KEY='w'
webrtcNoiseSuppressionEnabled TOGGLE_KEY='e'
customGainFactor UP_KEY='h' DOWN_KEY='j'
customGainFactorEnabled TOGGLE_KEY='k'
Press any key to close
依赖项:
tinycon, PortAudio, libwebrtc-audio-processing-devel
注意:我正在使用 cygwin,如果您在使用 libwebrtc 时遇到问题,请参阅 here
编译:
g++ main.cpp tinycon.cpp -o main -L./ -lcygportaudio-2 -lrt -lm -pthread -I/usr/include/webrtc_audio_processing/ -DWEBRTC_WIN -DWEBRTC
主程序
#include "portaudio.h"
#include <iostream>
#include <limits>
#include <chrono>
#include <thread>
#include <mutex>
#include "tinycon.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/include/trace.h"
using webrtc::AudioProcessing;
using webrtc::AudioFrame;
using webrtc::GainControl;
using webrtc::NoiseSuppression;
#define SAMPLE_RATE (32000)
#define FRAMES_PER_BUFFER (320)
#define DITHER_FLAG (0)
#define PA_SAMPLE_TYPE paInt16
#define SAMPLE_SIZE (2)
#define SAMPLE_SILENCE (0)
#define PRINTF_S_FORMAT "%d"
/*******************************************************************/
int customGainFactor = 1;
int customGainFactorStep = 1;
bool customGainControlEnabled = true;
int compression_gain_db = 1;
int compression_gain_dbStep = 1;
int target_level_dbfs = 1;
int target_level_dbfsStep = 1;
bool webrtcGainControlEnabled = true;
bool webrtcNoiseSuppressionEnabled = true;
int webrtcNoiseSuppressionLevel = 1;
int main(int argc, char **argv);
int xrun(PaStream *stream, int err, char* sampleBlock);
void error1(PaStream *stream, char* sampleBlock);
void error2(PaStream *stream, int err);
void listDevices();
webrtc::NoiseSuppression::Level webrtcNoiseSuppressionLevelToEnum(int level);
// Use tinycon and a second thread for non blocking input
class tcon : public tinyConsole
{
public:
tcon (std::string s): tinyConsole(s) {;}
int hotkeys(char c)
{
if (c == 'a') {
if (compression_gain_db >= (0+compression_gain_dbStep)) {
compression_gain_db -= compression_gain_dbStep;
}
printf( "Compression_gain_db: %d\n", compression_gain_db );
return 1;
}
if (c == 's') {
if (compression_gain_db <= (90-compression_gain_dbStep)) {
compression_gain_db += compression_gain_dbStep;
}
printf( "Compression_gain_db: %d\n", compression_gain_db );
return 1;
}
if (c == 'd') {
if (target_level_dbfs >= (0+target_level_dbfsStep)) {
target_level_dbfs -= target_level_dbfsStep;
}
printf( "target_level_dbfs: %d\n", target_level_dbfs );
return 1;
}
if (c == 'f') {
if (target_level_dbfs <= (31-target_level_dbfsStep)) {
target_level_dbfs += target_level_dbfsStep;
}
printf( "target_level_dbfs: %d\n", target_level_dbfs );
return 1;
}
if (c == 'g') {
webrtcGainControlEnabled = !webrtcGainControlEnabled;
printf("webrtcGainControlEnabled: %s\n", (webrtcGainControlEnabled) ? "true" : "false");
return 1;
}
if (c == 'h') {
if (customGainFactor >= (1+customGainFactorStep)) {
customGainFactor -= customGainFactorStep;
}
printf( "customGainFactor: %d\n", customGainFactor );
return 1;
}
if (c == 'j') {
customGainFactor += customGainFactorStep;
printf( "customGainFactor: %d\n", customGainFactor );
return 1;
}
if (c == 'k') {
customGainControlEnabled = !customGainControlEnabled;
printf("customGainControlEnabled: %s\n", (customGainControlEnabled) ? "true" : "false");
return 1;
}
if (c == 'q') {
if (webrtcNoiseSuppressionLevel <= (3-1)) {
webrtcNoiseSuppressionLevel += 1;
}
printf( "webrtcNoiseSuppressionLevel: %d\n", webrtcNoiseSuppressionLevel );
return 1;
}
if (c == 'w') {
if (webrtcNoiseSuppressionLevel >= (0+1)) {
webrtcNoiseSuppressionLevel -= 1;
}
printf( "webrtcNoiseSuppressionLevel: %d\n", webrtcNoiseSuppressionLevel );
return 1;
}
if (c == 'e') {
webrtcNoiseSuppressionEnabled = !webrtcNoiseSuppressionEnabled;
printf("webrtcNoiseSuppressionEnabled: %s\n", (webrtcNoiseSuppressionEnabled) ? "true" : "false");
return 1;
}
return 0;
}
};
int inputThread() {
tcon tc (std::string(""));
tc.run();
}
void listDevices() {
int i, numDevices, defaultDisplayed;
const PaDeviceInfo *deviceInfo;
Pa_Initialize();
numDevices = Pa_GetDeviceCount();
printf( "Number of devices = %d\n", numDevices );
int isInputDevice = 0;
for( i=0; i<numDevices; i++ )
{
deviceInfo = Pa_GetDeviceInfo( i );
int isInputDevice = (deviceInfo->maxInputChannels > 0);
printf( "%sDeviceID: %d, Name: %s\n", (isInputDevice ? "Input" : "Output"), i, deviceInfo->name);
}
fprintf (stderr, "Press any key to close\n");
getch();
}
int main (int argc, char **argv)
{
int c;
int inputDeviceId = -1;
int outputDeviceId = -1;
opterr = 0;
const char* helpMessage =
"-h : show this help message\n"
"-i <int> : select the INPUT DEVICE by id\n"
"-o <int> : select the OUPUT DEVICE by id\n"
"-c <int [0,90]> : compression_gain_db\n"
"-t <int [0, 31]> : target_level_dbfs\n"
"-g <0 or 1> : toggle webrtc gain control on and off (1 by default)\n"
"-k <0 or 1> : toggle custom gain control on and off (1 by default)\n"
"-f <int [1, maxInt]> : customGainControlFactor\n"
"-q <int [0, 5]> : webrtc noise supression level, high is more suppression\n"
"-e <0 or 1> : toggle webrtc noise suppression on and off (1 by default)\n"
"-d : list devices\n"
"\n"
"Real time controls:\n"
"compression_gain_db UP_KEY='a' DOWN_KEY='s'\n"
"target_dbfs_level UP_KEY='d' DOWN_KEY='f'\n"
"webrtcGainControlEnabled TOGGLE_KEY='g'\n"
"webrtcNoiseSuppressionLevel UP_KEY='q' DOWN_KEY='w'\n"
"webrtcNoiseSuppressionEnabled TOGGLE_KEY='e'\n"
"customGainFactor UP_KEY='h' DOWN_KEY='j'\n"
"customGainFactorEnabled TOGGLE_KEY='k'\n";
while ((c = getopt (argc, argv, "i:o:c:t:g:k:f:w:q:hd")) != -1) {
switch (c) {
case 'i':
inputDeviceId = atoi(optarg);
break;
case 'o':
outputDeviceId = atoi(optarg);
break;
case 'c':
compression_gain_db = atoi(optarg);
break;
case 't':
target_level_dbfs = atoi(optarg);
break;
case 'g':
webrtcGainControlEnabled = (atoi(optarg) == 1) ? true : false;
break;
case 'f':
customGainFactor = atoi(optarg);
break;
case 'k':
customGainControlEnabled = (atoi(optarg) == 1) ? true : false;
break;
case 'w':
webrtcNoiseSuppressionLevel = atoi(optarg);
break;
case 'e':
webrtcNoiseSuppressionEnabled = (atoi(optarg) == 1) ? true : false;
break;
case 'd':
listDevices();
return 0;
case '?':
if (isprint (optopt))
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt);
case 'h':
fprintf (stderr, helpMessage);
fprintf (stderr, "Press any key to close\n");
getch();
return 1;
default:
abort ();
}
}
// Start non blocking input thread
std::thread nonBlockingInputThread(inputThread);
PaStreamParameters inputParameters, outputParameters;
PaStream *stream = NULL;
PaError err;
const PaDeviceInfo* inputInfo;
const PaDeviceInfo* outputInfo;
char *sampleBlock = NULL;
int i;
int numBytes;
int numChannels;
err = Pa_Initialize();
if( err != paNoError ) error2(stream, err);
inputParameters.device = (inputDeviceId == -1) ? Pa_GetDefaultInputDevice() : inputDeviceId; /* default input device */
inputInfo = Pa_GetDeviceInfo( inputParameters.device );
outputParameters.device = (outputDeviceId == -1) ? Pa_GetDefaultOutputDevice() : outputDeviceId; /* default output device */
outputInfo = Pa_GetDeviceInfo( outputParameters.device );
numChannels = inputInfo->maxInputChannels < outputInfo->maxOutputChannels
? inputInfo->maxInputChannels : outputInfo->maxOutputChannels;
inputParameters.channelCount = numChannels;
inputParameters.sampleFormat = PA_SAMPLE_TYPE;
inputParameters.suggestedLatency = inputInfo->defaultHighInputLatency ;
inputParameters.hostApiSpecificStreamInfo = NULL;
printf( "Input device # %d.\n", inputParameters.device );
printf( " Name: %s\n", inputInfo->name );
outputParameters.channelCount = numChannels;
outputParameters.sampleFormat = PA_SAMPLE_TYPE;
outputParameters.suggestedLatency = outputInfo->defaultHighOutputLatency;
outputParameters.hostApiSpecificStreamInfo = NULL;
printf( "Output device # %d.\n", outputParameters.device );
printf( " Name: %s\n", outputInfo->name );
/* -- setup -- */
err = Pa_OpenStream(
&stream,
&inputParameters,
&outputParameters,
SAMPLE_RATE,
FRAMES_PER_BUFFER,
paClipOff, /* we won't output out of range samples so don't bother clipping them */
NULL, /* no callback, use blocking API */
NULL ); /* no callback, so no callback userData */
if( err != paNoError ) error2(stream, err);
numBytes = FRAMES_PER_BUFFER * numChannels * SAMPLE_SIZE ;
sampleBlock = (char *) malloc( numBytes );
if( sampleBlock == NULL )
{
printf("Could not allocate record array.\n");
error1(stream, sampleBlock);
}
// Configure webrtc::audioprocessing
int webrtcErr;
AudioProcessing* apm = AudioProcessing::Create();
apm->high_pass_filter()->Enable(true);
apm->noise_suppression()->set_level(webrtcNoiseSuppressionLevelToEnum(webrtcNoiseSuppressionLevel));
apm->noise_suppression()->Enable(webrtcNoiseSuppressionEnabled);
apm->gain_control()->set_mode(apm->gain_control()->kFixedDigital);
apm->gain_control()->set_compression_gain_db(compression_gain_db);
apm->gain_control()->set_target_level_dbfs(target_level_dbfs);
apm->gain_control()->Enable(webrtcGainControlEnabled);
err = Pa_StartStream( stream );
if( err != paNoError ) error1(stream, sampleBlock);
while (1) {
// You may get underruns or overruns if the output is not primed by PortAudio.
err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER );
if( err ) xrun(stream, err, sampleBlock);
// Run custom gain solution
if (customGainControlEnabled) {
int blockIndex;
short* sampleBlockShort = (short*)sampleBlock;
for (blockIndex = 0; blockIndex < FRAMES_PER_BUFFER*numChannels; blockIndex++) {
int iSample = (int)sampleBlockShort[blockIndex];
iSample *= customGainFactor;
if (iSample > std::numeric_limits<short>::max())
iSample =
(iSample > std::numeric_limits<short>::max()) ? std::numeric_limits<short>::max()
: (iSample < std::numeric_limits<short>::min()) ? std::numeric_limits<short>::min()
: iSample;
sampleBlockShort[blockIndex] = (short)iSample;
}
}
// Apply webrtc gain and noise suppression
apm->noise_suppression()->set_level(webrtcNoiseSuppressionLevelToEnum(webrtcNoiseSuppressionLevel));
apm->noise_suppression()->Enable(webrtcNoiseSuppressionEnabled);
apm->gain_control()->set_compression_gain_db(compression_gain_db);
apm->gain_control()->set_target_level_dbfs(target_level_dbfs);
apm->gain_control()->Enable(webrtcGainControlEnabled);
webrtc::AudioFrame frame;
frame.num_channels_ = numChannels;
frame.sample_rate_hz_ = SAMPLE_RATE;
frame.samples_per_channel_ = FRAMES_PER_BUFFER;
memcpy(frame.data_, sampleBlock, numBytes);
if ((webrtcErr = apm->ProcessStream(&frame)) < 0) {
printf("Error Code: %d\n", webrtcErr); fflush(stdout);
return -1;
}
memcpy(sampleBlock, frame.data_, numBytes);
err = Pa_WriteStream( stream, sampleBlock, FRAMES_PER_BUFFER );
if( err ) xrun(stream, err, sampleBlock);
}
printf("Wire off.\n"); fflush(stdout);
err = Pa_StopStream( stream );
if( err != paNoError ) error1(stream, sampleBlock);
free( sampleBlock );
Pa_Terminate();
return 0;
}
int xrun(PaStream *stream, int err, char* sampleBlock) {
printf("err = %d\n", err); fflush(stdout);
if( stream ) {
Pa_AbortStream( stream );
Pa_CloseStream( stream );
}
free( sampleBlock );
Pa_Terminate();
if( err & paInputOverflow )
fprintf( stderr, "Input Overflow.\n" );
if( err & paOutputUnderflow )
fprintf( stderr, "Output Underflow.\n" );
return -2;
}
void error1(PaStream *stream, char* sampleBlock) {
free( sampleBlock );
exit(-1);
}
void error2(PaStream *stream, int err) {
if( stream ) {
Pa_AbortStream( stream );
Pa_CloseStream( stream );
}
Pa_Terminate();
fprintf( stderr, "An error occured while using the portaudio stream\n" );
fprintf( stderr, "Error number: %d\n", err );
fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
exit(-1);
}
webrtc::NoiseSuppression::Level webrtcNoiseSuppressionLevelToEnum(int level) {
switch (level) {
case 0 : return webrtc::NoiseSuppression::Level::kLow;
case 1 : return webrtc::NoiseSuppression::Level::kModerate;
case 2 : return webrtc::NoiseSuppression::Level::kHigh;
case 3 : return webrtc::NoiseSuppression::Level::kVeryHigh;
}
}
关于c++ - 以编程方式增加麦克风增益,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/42748949/