本文介绍了是否可以将多个音频文件混合在一起,最好是用javascript的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我希望将音频剪辑组合在一起,相互叠加,以便它们同步播放并保存在新的音频文件中。任何帮助将非常感激。我已经在线进行了一些挖掘工作,但是无法找到关于Javascript音频编辑库(例如)的许多工具是否可用的确切答案。

I want to combine audio clips, layered on top of each other so that they play synchronously and are saved in a new audio file. Any help would be much appreciated. I've done some digging online, but couldn't find a definitive answer as to whether or not many of the tools available as far as Javascript audio editing librarys go (Mix.js for example) are capable.

推荐答案

是的,可以使用或 AudioContext.createChannelMerger()并创建a MediaStream 。请参阅,。

Yes, it is possible using OfflineAudioContext() or AudioContext.createChannelMerger() and creating a MediaStream. See Phonegap mixing audio files , Web Audio API.

您可以使用 fetch() XMLHttpRequest() ArrayBuffer 检索音频资源> AudioContext.decodeAudioData()从响应中创建 AudioBufferSourceNode ; OfflineAudioContext()用于呈现合并音频, AudioContext AudioContext.createBufferSource() AudioContext.createMediaStreamDestination() MediaRecorder()来记录流; Promise.all() Promise()构造函数, .then()处理异步请求到 fetch() AudioContext.decodeAudioData(),传递结果混合音频 Blob at stop MediaRecorder 的事件。

You can use fetch() or XMLHttpRequest() to retrieve audio resource as an ArrayBuffer, AudioContext.decodeAudioData() to create an AudioBufferSourceNode from response; OfflineAudioContext() to render merged audio, AudioContext, AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination() , MediaRecorder() to record stream; Promise.all(), Promise() constructor, .then() to process asynchronous requests to fetch(), AudioContext.decodeAudioData(), pass resulting mixed audio Blob at stop event of MediaRecorder.

将每个 AudioContext AudioBufferSourceNode 连接到 OfflineAudioContext.destination ,在每个节点上调用 .start();调用 OfflineAudioContext.startRendering();创建新的 AudioContext 节点,连接 renderedBuffer ;在 AudioContext 上调用 .createMediaStreamDestination()来创建 MediaStream 来自合并的音频缓冲区,将 .stream 传递给 MediaRecorder(),在停止事件 MediaRecorder ,创建 Blob URL Blob 录制的音频混合 URL.createObjectURL(),可以使用< a> 元素下载下载属性和 href 设置为 Blob URL

Connect each AudioContext AudioBufferSourceNode to OfflineAudioContext.destination, call .start() on each node; call OfflineAudioContext.startRendering(); create new AudioContext node, connect renderedBuffer; call .createMediaStreamDestination() on AudioContext to create a MediaStream from merged audio buffers, pass .stream to MediaRecorder(), at stop event of MediaRecorder, create Blob URL of Blob of recorded audio mix with URL.createObjectURL(), which can be downloaded using <a> element with download attribute and href set to Blob URL.

var sources = ["https://upload.wikimedia.org/wikipedia/commons/b/be/"
               + "Hidden_Tribe_-_Didgeridoo_1_Live.ogg"
               , "https://upload.wikimedia.org/wikipedia/commons/6/6e/"
               + "Micronesia_National_Anthem.ogg"];

var description = "HiddenTribeAnthem";
var context;
var recorder;
var div = document.querySelector("div");
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
player.controls = "controls";

function get(src) {
  return fetch(src)
    .then(function(response) {
      return response.arrayBuffer()
    })
}

function stopMix(duration, ...media) {
  setTimeout(function(media) {
    media.forEach(function(node) {
      node.stop()
    })
  }, duration, media)
}

Promise.all(sources.map(get)).then(function(data) {
    var len = Math.max.apply(Math, data.map(function(buffer) {
      return buffer.byteLength
    }));
    context = new OfflineAudioContext(2, len, 44100);
    return Promise.all(data.map(function(buffer) {
        return audio.decodeAudioData(buffer)
          .then(function(bufferSource) {
            var source = context.createBufferSource();
            source.buffer = bufferSource;
            source.connect(context.destination);
            return source.start()
          })
      }))
      .then(function() {
        return context.startRendering()
      })
      .then(function(renderedBuffer) {
        return new Promise(function(resolve) {
          var mix = audio.createBufferSource();
          mix.buffer = renderedBuffer;
          mix.connect(audio.destination);
          mix.connect(mixedAudio);
          recorder = new MediaRecorder(mixedAudio.stream);
          recorder.start(0);
          mix.start(0);
          div.innerHTML = "playing and recording tracks..";
          // stop playback and recorder in 60 seconds
          stopMix(duration, mix, recorder)

          recorder.ondataavailable = function(event) {
            chunks.push(event.data);
          };

          recorder.onstop = function(event) {
            var blob = new Blob(chunks,  {
              "type": "audio/ogg; codecs=opus"
            });
            console.log("recording complete");
            resolve(blob)
          };
        })
      })
      .then(function(blob) {
        console.log(blob);
        div.innerHTML = "mixed audio tracks ready for download..";
        var audioDownload = URL.createObjectURL(blob);
        var a = document.createElement("a");
        a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
        a.href = audioDownload;
        a.innerHTML = a.download;
        document.body.appendChild(a);
        a.insertAdjacentHTML("afterend", "<br>");
        player.src = audioDownload;
        document.body.appendChild(player);
      })
  })
  .catch(function(e) {
    console.log(e)
  });
<!DOCTYPE html>
<html>

<head>
</head>

<body>
  <div>loading audio tracks.. please wait</div>
</body>

</html>

您也可以使用 AudioContext.createChannelMerger() AudioContext.createChannelSplitter()

var sources = ["/path/to/audoi1", "/path/to/audio2"];
var description = "mix";
var chunks = [];
var channels = [[0, 1], [1, 0]];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;

player.controls = "controls";

function get(src) {
  return fetch(src)
    .then(function(response) {
      return response.arrayBuffer()
    })
}

function stopMix(duration, ...media) {
  setTimeout(function(media) {
    media.forEach(function(node) {
      node.stop()
    })
  }, duration, media)
}

Promise.all(sources.map(get)).then(function(data) {
    return Promise.all(data.map(function(buffer, index) {
        return audio.decodeAudioData(buffer)
          .then(function(bufferSource) {
            var channel = channels[index];
            var source = audio.createBufferSource();
            source.buffer = bufferSource;
            source.connect(splitter);
            splitter.connect(merger, channel[0], channel[1]);
            return source
          })
      }))
      .then(function(audionodes) {
        merger.connect(mixedAudio);
        merger.connect(audio.destination);
        recorder = new MediaRecorder(mixedAudio.stream);
        recorder.start(0);
        audionodes.forEach(function(node) {
          node.start(0)
        });

        stopMix(duration, ...audionodes, recorder);

        recorder.ondataavailable = function(event) {
          chunks.push(event.data);
        };

        recorder.onstop = function(event) {
          var blob = new Blob(chunks, {
            "type": "audio/ogg; codecs=opus"
          });
          audioDownload = URL.createObjectURL(blob);
          var a = document.createElement("a");
          a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
          a.href = audioDownload;
          a.innerHTML = a.download;
          player.src = audioDownload;
          document.body.appendChild(a);
          document.body.appendChild(player);
        };
      })
  })
  .catch(function(e) {
    console.log(e)
  });

这篇关于是否可以将多个音频文件混合在一起,最好是用javascript的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!

08-01 08:56