我正在尝试制作一个简单的摄像头应用程序,使前置摄像头可以检测到面部。
这应该足够简单:
class CameraView: UIImageView, AVCaptureVideoDataOutputSampleBufferDelegate
override init(frame: CGRect) {
super.init(frame:frame)
handleCamera()
}
func handleCamera () {
camera = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera,
mediaType: AVMediaTypeVideo, position: .front)
session = AVCaptureSession()
// Set recovered camera as an input device for the capture session
do {
try input = AVCaptureDeviceInput(device: camera);
} catch _ as NSError {
print ("ERROR: Front camera can't be used as input")
input = nil
}
// Add the input from the camera to the capture session
if (session?.canAddInput(input) == true) {
session?.addInput(input)
}
output = AVCaptureVideoDataOutput()
output?.alwaysDiscardsLateVideoFrames = true
outputQueue = DispatchQueue(label: "outputQueue")
output?.setSampleBufferDelegate(self, queue: outputQueue)
// add front camera output to the session for use and modification
if(session?.canAddOutput(output) == true){
session?.addOutput(output)
} // front camera can't be used as output, not working: handle error
else {
print("ERROR: Output not viable")
}
// Setup camera preview with the session input
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
previewLayer?.frame = self.bounds
self.layer.addSublayer(previewLayer!)
// Process the camera and run it onto the preview
session?.startRunning()
func captureOutput(_ captureOutput: AVCaptureOutput!, didDrop sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer!)
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
let faces = faceDetector?.features(in: cameraImage)
for face in faces as! [CIFaceFeature] {
print("Found bounds are \(face.bounds)")
let faceBox = UIView(frame: face.bounds)
faceBox.layer.borderWidth = 3
faceBox.layer.borderColor = UIColor.red.cgColor
faceBox.backgroundColor = UIColor.clear
self.addSubview(faceBox)
if face.hasLeftEyePosition {
print("Left eye bounds are \(face.leftEyePosition)")
}
if face.hasRightEyePosition {
print("Right eye bounds are \(face.rightEyePosition)")
}
}
}
我的问题是:我可以使相机运行,但是由于我尝试了从互联网上进行的多种编码操作,因此我从未能够通过captureOutput检测到人脸。应用程序没有输入函数,或者由于变量不起作用而导致崩溃,通常是sampleBuffer变量为nul。
我究竟做错了什么?
最佳答案
您需要将captureOutput
函数参数更改为以下内容:func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
您的captureOutput
函数在缓冲区丢失时调用,而不是在从相机获取缓冲区时调用。
关于ios - iOS相机脸部追踪(Swift 3 Xcode 8),我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/43389445/