我使用RPScreenRecorder.shared()。startCapture进行屏幕录制,并使用AVAssetWriterInput编码为h264视频文件,但它为我提供了直接的.mp4,并且我希望在录制要流式传输的屏幕时逐帧播放h264视频文件。有什么办法可以访问来自RPScreenRecorder.shared()。startCapture的样本缓冲区数据?这是代码。
在这里,我得到了整个mp4文件,但我只想要视频帧
import Foundation
import ReplayKit
import AVKit
class ScreenRecorder
{
var assetWriter:AVAssetWriter!
var videoInput:AVAssetWriterInput!
let viewOverlay = WindowUtil()
let fileNameTxt = "Test"
let dir = try? FileManager.default.url(for: .documentDirectory,
in: .userDomainMask, appropriateFor: nil, create: true)
var sampleFileBuffer : String = ""
//MARK: Screen Recording
func startRecording(withFileName fileName: String, recordingHandler:@escaping (Error?)-> Void)
{
if #available(iOS 11.0, *)
{
let fileURL = URL(fileURLWithPath: ReplayFileUtil.filePath(fileName))
assetWriter = try! AVAssetWriter(outputURL: fileURL, fileType:
AVFileType.mp4)
let videoOutputSettings: Dictionary<String, Any> = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : UIScreen.main.bounds.size.width,
AVVideoHeightKey : UIScreen.main.bounds.size.height
];
videoInput = AVAssetWriterInput (mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
videoInput.expectsMediaDataInRealTime = true
assetWriter.add(videoInput)
// If the directory was found, we write a file to it and read it back
let fileURLTxt = dir?.appendingPathComponent(fileNameTxt).appendingPathExtension("txt")
RPScreenRecorder.shared().startCapture(handler: { (sample, bufferType, error) in
//print(sample, bufferType, error)
recordingHandler(error)
if CMSampleBufferDataIsReady(sample)
{
if self.assetWriter.status == AVAssetWriterStatus.unknown
{
self.assetWriter.startWriting()
self.assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sample))
}
if self.assetWriter.status == AVAssetWriterStatus.failed {
print("Error occured, status = \(self.assetWriter.status.rawValue), \(self.assetWriter.error!.localizedDescription) \(String(describing: self.assetWriter.error))")
return
}
if (bufferType == .video)
{
if self.videoInput.isReadyForMoreMediaData
{
self.videoInput.append(sample)
// self.sampleFileBuffer = self.videoInput as! String
self.sampleFileBuffer = String(sample as! String) //sample as! String
do {
try self.sampleFileBuffer.write(to: fileURLTxt!, atomically: true, encoding: .utf8)
} catch {
print("Failed writing to URL: \(fileURLTxt), Error: " + error.localizedDescription)
}
}
}
self.sampleFileBuffer = ""
}
}) { (error) in
recordingHandler(error)
}
} else
{
// Fallback on earlier versions
}
}
func stopRecording(handler: @escaping (Error?) -> Void)
{
if #available(iOS 11.0, *)
{
RPScreenRecorder.shared().stopCapture
{ (error) in
handler(error)
self.assetWriter.finishWriting
{
print(ReplayFileUtil.fetchAllReplays())
}
}
}
}
}
最佳答案
在您的代码上,示例是CMSampleBuffer。
调用CMSampleBufferGetImageBuffer()并获取CVImageBuffer。
要锁定帧缓冲区,请调用CVPixelBufferLockBaseAddress(imageBuffer)。
就我而言,imageBuffer具有2个平面,Y和UV。
调用CVPixelBufferGetBaseAddressOfPlane(imageBuffer,0)并获取Y平面地址。
使用planeIndex = 1调用相同的API并获取UV平面地址。
获得飞机的基本地址后,您可以将其读取为uint8 *。
调用CVPixelBufferGetXXX API以获取宽度,高度,每行字节数。
不要忘记调用CVPixelBufferUnlockBaseAddress。