我已经搜寻和研究了好几天,但似乎无法正常工作,也无法在互联网上找到任何解决方案。
我正在尝试使用麦克风捕捉声音,然后通过扬声器播放。
这是我的代码:
class ViewController: UIViewController, AVAudioRecorderDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
var recordingSession: AVAudioSession!
var audioRecorder: AVAudioRecorder!
var captureSession: AVCaptureSession!
var microphone: AVCaptureDevice!
var inputDevice: AVCaptureDeviceInput!
var outputDevice: AVCaptureAudioDataOutput!
override func viewDidLoad() {
super.viewDidLoad()
recordingSession = AVAudioSession.sharedInstance()
do{
try recordingSession.setCategory(AVAudioSessionCategoryPlayAndRecord)
try recordingSession.setMode(AVAudioSessionModeVoiceChat)
try recordingSession.setPreferredSampleRate(44000.00)
try recordingSession.setPreferredIOBufferDuration(0.2)
try recordingSession.setActive(true)
recordingSession.requestRecordPermission() { [unowned self] (allowed: Bool) -> Void in
DispatchQueue.main.async {
if allowed {
do{
self.microphone = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
try self.inputDevice = AVCaptureDeviceInput.init(device: self.microphone)
self.outputDevice = AVCaptureAudioDataOutput()
self.outputDevice.setSampleBufferDelegate(self, queue: DispatchQueue.main)
self.captureSession = AVCaptureSession()
self.captureSession.addInput(self.inputDevice)
self.captureSession.addOutput(self.outputDevice)
self.captureSession.startRunning()
}
catch let error {
print(error.localizedDescription)
}
}
}
}
}catch let error{
print(error.localizedDescription)
}
}
和回调函数:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
var audioBufferList = AudioBufferList(
mNumberBuffers: 1,
mBuffers: AudioBuffer(mNumberChannels: 0,
mDataByteSize: 0,
mData: nil)
)
var blockBuffer: CMBlockBuffer?
var osStatus = CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
nil,
&audioBufferList,
MemoryLayout<AudioBufferList>.size,
nil,
nil,
UInt32(kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment),
&blockBuffer
)
do {
var data: NSMutableData = NSMutableData.init()
for i in 0..<audioBufferList.mNumberBuffers {
var audioBuffer = AudioBuffer(
mNumberChannels: audioBufferList.mBuffers.mNumberChannels,
mDataByteSize: audioBufferList.mBuffers.mDataByteSize,
mData: audioBufferList.mBuffers.mData
)
let frame = audioBuffer.mData?.load(as: Float32.self)
data.append(audioBuffer.mData!, length: Int(audioBuffer.mDataByteSize))
}
var dataFromNsData = Data.init(referencing: data)
var avAudioPlayer: AVAudioPlayer = try AVAudioPlayer.init(data: dataFromNsData)
avAudioPlayer.prepareToPlay()
avAudioPlayer.play()
}
}
catch let error {
print(error.localizedDescription)
//prints out The operation couldn’t be completed. (OSStatus error 1954115647.)
}
对此的任何帮助都将是惊人的,并且可能会帮助很多其他人,因为那里有许多不完整的快速版本。
谢谢。
最佳答案
你很亲密!您正在didOutputSampleBuffer
回调中捕获音频,但这是一个高频回调,因此您创建了很多AVAudioPlayer
并将原始LPCM数据传递给他们,而他们只知道如何解析CoreAudio文件类型,然后无论如何它们都超出了范围。
您可以使用AVCaptureSession
的AVAudioEngine
轻松播放用AVAudioPlayerNode
捕获的缓冲区,但是到那时,您也可以使用AVAudioEngine
从麦克风录制:
import UIKit
import AVFoundation
class ViewController: UIViewController {
var engine = AVAudioEngine()
override func viewDidLoad() {
super.viewDidLoad()
let input = engine.inputNode!
let player = AVAudioPlayerNode()
engine.attach(player)
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
input.installTap(onBus: bus, bufferSize: 512, format: inputFormat) { (buffer, time) -> Void in
player.scheduleBuffer(buffer)
}
try! engine.start()
player.play()
}
}