本文介绍了如何使用Audiocontext html5从麦克风获取音频数据的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!
问题描述
我正在尝试从麦克风获取数据流(例如音量,音高)。
现在我一直在使用getUserMedia来访问我的麦克风音频。
但是我找不到从中提取数据的方法。
I'm trying to get a stream of data from my microphone (ex. volume, pitch ).For now I've been using getUserMedia to access my microphone audio.But I couldn't find a way to extract my the data from it.
我的代码:
$(function () {
var audioContext = new AudioContext();
var audioInput = null,
realAudioInput = null,
inputPoint = null,
analyserNode = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function success(stream){
inputPoint = audioContext.createGain();
realAudioInput = audioContext.createMediaStreamSource(stream);
audioInput = realAudioInput;
audioInput.connect(inputPoint);
analyserNode = audioContext.createAnalyser();
analyserNode.fftSize = 2048;
inputPoint.connect( analyserNode );
}
function live(){
requestAnimationFrame(live);
var freqByteData = new Uint8Array(analyserNode.frequencyBinCount);
analyserNode.getByteFrequencyData(freqByteData);
console.log(analyserNode.frequencyBinCount);
}
});
推荐答案
这是你的代码的两个版本:
Here is a version of your code which does two things :
- 从现场麦克风中检索原始PCM音频缓冲区,发送到console.log(显示javascript控制台命中ctrl-shift-i ),这是时域中流式麦克风音频数据的曲线。
- 它还将相同的音频数据运行到FFT(快速傅立叶变换)中,这也发送到console.log,这是频域表示
注意 - 要么佩戴耳机要么调低扬声器音量,否则你会听到音频反馈的尖叫声,因为麦克风将扬声器音频发送给Jimmy Hendrix!
NOTE - either wear headphones OR turn down your speaker volume otherwise you will hear the squeal of audio feedback as the mic will pickup speaker audio a la Jimmy Hendrix !
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE_RENDERER = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_analysis_node = null,
analyser_node = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
console.log("__________ " + label);
if (label === "time") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
var curr_value_time = (given_typed_array[index] / 128) - 1.0;
console.log(curr_value_time);
}
} else if (label === "frequency") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
} else {
throw new Error("ERROR - must pass time or frequency");
}
}
function process_microphone_buffer(event) {
var i, N, inp, microphone_output_buffer;
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_analysis_node.connect(gain_node);
analyser_node = audioContext.createAnalyser();
analyser_node.smoothingTimeConstant = 0;
analyser_node.fftSize = 2048;
microphone_stream.connect(analyser_node);
analyser_node.connect(script_processor_analysis_node);
var buffer_length = analyser_node.frequencyBinCount;
var array_freq_domain = new Uint8Array(buffer_length);
var array_time_domain = new Uint8Array(buffer_length);
console.log("buffer_length " + buffer_length);
script_processor_analysis_node.onaudioprocess = function() {
// get the average for the first channel
analyser_node.getByteFrequencyData(array_freq_domain);
analyser_node.getByteTimeDomainData(array_time_domain);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array_freq_domain, 5, "frequency");
show_some_data(array_time_domain, 5, "time"); // store this to record to aggregate buffer/file
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>
这篇关于如何使用Audiocontext html5从麦克风获取音频数据的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!