我有一个MacOS Swift应用程序,可以处理从麦克风录制的音频数据。麦克风具有立体声功能,但我只能录制单声道数据。
在下面的代码中,如果我let alwaysMono = true,func setup()报告 Activity 格式为立体声,但将其覆盖为单声道。一切都与单声道输入流一起工​​作。
如果我let alwaysMono = false,setup()将nChannels设置为2。但是captureOutput不会获取任何数据。从UnsafeMutableAudioBufferListPointer返回的AudioBuffer始终具有nil mData。如果我不检查nil mData,程序将崩溃。
如何获得完整的立体声输入?
编辑:在captureOutput中,CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer返回错误代码-12737,它对应于kCMSampleBufferError_ArrayTooSmall。我已经检查了传递给captureOutput的sampleBuffer arg,但看不到任何明显的错误。但是我不知道要寻找什么。
另一个编辑:我使用内置的单声道麦克风对代码进行了测试,令我惊讶的是它还认为它是立体声的,这表明我获取和使用AVCaptureDevice.activeFormat的方式显然存在问题。我不知道从这里去哪里。

class Recorder: NSObject, AVCaptureAudioDataOutputSampleBufferDelegate {
    let alwaysMono = false
    var nChannels:UInt32 = 1
    let session : AVCaptureSession!
    static let realTimeQueue = DispatchQueue(label: "com.myapp.realtime",
                                             qos: DispatchQoS( qosClass:DispatchQoS.QoSClass.userInitiated, relativePriority: 0 ))
    override init() {
        session = AVCaptureSession()
        super.init()
    }
    static var recorder:Recorder?
    static func record() ->Bool {
        if recorder == nil {
            recorder = Recorder()
            if !recorder!.setup(callback:record) {
                recorder = nil
                return false
            }
        }
        realTimeQueue.async {
            if !recorder!.session.isRunning {
                recorder!.session.startRunning()
            }
        }
        return true
    }
    static func pause() {
        recorder!.session.stopRunning()
    }
    func setup( callback:@escaping (()->Bool)) -> Bool {
        let device = AVCaptureDevice.default( for: AVMediaType.audio )
        if device == nil { return false }
        if let format = Recorder.getActiveFormat() {
            nChannels = format.mChannelLayoutTag == kAudioChannelLayoutTag_Stereo ? 2 : 1
            print("active format is \((nChannels==2) ? "Stereo" : "Mono")")
            if alwaysMono {
                print( "Overriding to mono" )
                nChannels = 1
            }
        }
        if #available(OSX 10.14, *) {
            let status = AVCaptureDevice.authorizationStatus( for:AVMediaType.audio )
            if status == .notDetermined {
                AVCaptureDevice.requestAccess(for: AVMediaType.audio ){ granted in
                    _ = callback()
                }
                return false
            } else if status != .authorized {
                return false
            }
        }
        var input : AVCaptureDeviceInput
        do {
            try device!.lockForConfiguration()
            try input = AVCaptureDeviceInput( device: device! )
            device!.unlockForConfiguration()
        } catch {
            device!.unlockForConfiguration()
            return false
        }
        let output = AVCaptureAudioDataOutput()
        output.setSampleBufferDelegate(self, queue: Intonia.realTimeQueue)
        let settings = [
            AVFormatIDKey: kAudioFormatLinearPCM,
            AVNumberOfChannelsKey : nChannels,
            AVSampleRateKey : 44100,
            AVLinearPCMBitDepthKey : 16,
            AVLinearPCMIsFloatKey : false
            ] as [String : Any]
        output.audioSettings = settings
        session.beginConfiguration()
        if !session.canAddInput( input ) {
            return false
        }
        session.addInput( input )
        if !session.canAddOutput( output ) {
            return false
        }
        session.addOutput( output )
        session.commitConfiguration()
        return true
    }
    func getActiveFormat() -> AudioFormatListItem? {
        if #available(OSX 10.15, *) {
            let device = AVCaptureDevice.default( for: AVMediaType.audio )
            if device == nil { return nil }
            let list = device!.activeFormat.formatDescription.audioFormatList
            if list.count < 1 { return nil }
            return list[0]
        }
        return nil
    }
    func captureOutput(_ captureOutput: AVCaptureOutput,
                       didOutput sampleBuffer: CMSampleBuffer,
                       from connection: AVCaptureConnection){
        var buffer: CMBlockBuffer? = nil
        var audioBufferList = AudioBufferList(
            mNumberBuffers: 1,
            mBuffers: AudioBuffer(mNumberChannels: nChannels, mDataByteSize: 0, mData: nil)
        )
        CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
            sampleBuffer,
            bufferListSizeNeededOut: nil,
            bufferListOut: &audioBufferList,
            bufferListSize: MemoryLayout<AudioBufferList>.size,
            blockBufferAllocator: nil,
            blockBufferMemoryAllocator: nil,
            flags: UInt32(kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment),
            blockBufferOut: &buffer
        )
        let abl = UnsafeMutableAudioBufferListPointer(&audioBufferList)
        for buff in abl {
            if buff.mData != nil {
                let count = Int(buff.mDataByteSize)/MemoryLayout<Int16>.size
                let samples = UnsafeMutablePointer<Int16>(OpaquePointer(buff.mData))
                process(samples:samples!, count:count)
            } else {
                print("No data!")
            }
        }
    }
    func process( samples: UnsafeMutablePointer<Int16>, count: Int ) {
        let firstValue = samples[0]
        print( "\(count) values received, first is \(firstValue)" )
    }
}

最佳答案

这是一个有很多问题出在哪里的问题。首先,这取决于您使用的麦克风。是Mac上的一台吗?如果是这样,那么通常是单声道。但是,您可以使自己成为一种变通办法,以测试它是否有效,并且不依赖于always:bool。尝试将其录制两次,使用单声道拍摄第一个 channel ,然后使用第二个 channel 进行第二次录制,并使用不同的“加倍”代码进行录制。
我看到的第二件事可能是在设置功能中显式键入立体声 channel ,以确保它获取其2个 channel ,并始终使用带有显式== true或false语句的alwaysMono。看看您是否可以在代码期间同时打印一些 channel 信息,以便您确切地知道哪里出了问题。

关于swift - 捕获立体声音频数据,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/64126896/

10-12 14:49