音视频混流
主要用的API有:
1、创建一个BGM音频流
// 创建AudioContext实例
var audioCtx = new AudioContext()
// 创建一个音频源
var source = audioCtx.createBufferSource()
// 创建一个媒体流节点
var track = audioCtx.createMediaStreamDestination()
ok,到这里为止,我们BGM的关键对象都齐全了,不过这些只有容器,没有最重要的内容。刚刚我们说了decodeAudioData这个api,这个api可以将数据解码音频中的ArrayBuffer。
// 获取BGM的文件的arraybuffer
var request = new XMLHttpRequest();
request.open('GET', './music/test.mp3', true);
request.responseType = 'arraybuffer';
request.onload = function() {
// 这里我们通过ajax请求将文件转arraybuffer格式
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
// 这里的buffer就是解码过的arraybuffer
// 将buffer加入音频源里
source.buffer = buffer
// 将音频输出到一个媒体流节点上
source.connect(track)
// 重复播放BGM
source.loop = true;
// 播放缓冲区的音频数据
source.start(0)
})
}
request.send();
到这里为止,我们已经将BGM的音频捕获到了,接下来就是将其合进摄像头和麦克捕获的音视频流内。
2、捕获摄像头音视频流(这个在之前讲过传送)
navigator.mediaDevices.getUserMedia({
audio: true,
video: true
}).then(function(stream) {
// stream这里就是我们捕获的摄像头音视频流
// 将BGM内的音频流提取出来
var bgmAudioTrack = track.stream.getAudioTracks()[0]
// 这里track.stream是重媒体流节点里的媒体流,getAudioTracks这才是从流内获取音频流集合
// stream添加音频流
stream.addTrack(bgmAudioTrack)
})
现在我们获取到的就是混音之后的音视频了,之后就是将媒体导出本地了
3、下载多媒体(这个在之前讲过传送)
var start = document.getElementById('start')
var stop = document.getElementById('stop')
const chunks = null
// 创建MediaRecorder实例(这里可以看之前的文章)
var mediaRecorder = new MediaRecorder(stream, {
audioBitsPerSecond : 128000,
videoBitsPerSecond : 100000,
mimeType : 'video/webm;codecs=h264'
})
start.onclick = function () {
mediaRecorder.start()
console.log('开始采集')
}
stop.onclick = function () {
mediaRecorder.stop()
console.log('停止采集')
}
mediaRecorder.onstop = function (e) {
var blob = new Blob([chunks], { 'type' : 'video/mp4' })
let a = document.createElement('a')
a.href = URL.createObjectURL(blob)
a.download = `test.mp4`
a.click()
}
mediaRecorder.ondataavailable = function(e) {
chunks = e.data
}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>音视频混流</title>
</head>
<body>
<button id="button">开启摄像头</button>
<button id="start">开始采集</button>
<button id="stop">停止采集</button>
</body>
<script>
var button = document.getElementById('button')
var start = document.getElementById('start')
var stop = document.getElementById('stop')
var audioDom2 = document.querySelector('#audio2')
var audioCtx = new AudioContext();
var source = audioCtx.createBufferSource()
var track = audioCtx.createMediaStreamDestination();
button.onclick = function () {
navigator.mediaDevices.getUserMedia({
audio: true,
video: true
})
.then(function(stream) {
stream.addTrack(track.stream.getAudioTracks()[0])
/* 使用这个stream stream */
var mediaRecorder = new MediaRecorder(stream, {
audioBitsPerSecond : 128000,
videoBitsPerSecond : 100000,
mimeType : 'video/webm;codecs=h264'
})
start.onclick = function () {
mediaRecorder.start()
console.log('开始采集')
}
stop.onclick = function () {
mediaRecorder.stop()
console.log('停止采集')
}
mediaRecorder.onstop = function (e) {
var blob = new Blob([chunks], { 'type' : 'video/mp4' })
let a = document.createElement('a')
a.href = URL.createObjectURL(blob)
a.download = `test.mp4`
a.click()
}
mediaRecorder.ondataavailable = function(e) {
console.log(e)
chunks = e.data
}
})
.catch(function(err) {
console.log(err)
/* 处理error */
});
}
function getData() {
var request = new XMLHttpRequest();
request.open('GET', './music/test.mp3', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
source.buffer = buffer;
source.connect(track)
source.loop = true;
source.start(0)
},
function(e){"Error with decoding audio data" + e.err});
}
request.send();
}
getData()
</script>
</html>