实时面部检测不起作用

实时面部检测不起作用

本文介绍了实时面部检测不起作用的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

此代码未显示相机中的脸部检测,即使没有错误。
我希望脸部应该在红色绅士的环绕下实时检测,但我想我没有正确放置代码或者我应该在Viewdidload或其他地方放置什么东西?

This code does not show the detection of face in camera, even there is no error.I want the face should be detected in realtime in camera with red squire surrounded, but I think I have not placed the code properly or where I should place something in Viewdidload or something else?

import UIKit
import CoreImage

class ViewController: UIViewController ,UIAlertViewDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate  {

@IBOutlet var imageView: UIImageView!
@IBAction func Moodify(_ sender: UIButton) {


    func detect() {

        guard let personciImage = CIImage(image: imageView.image!) else {
            return
        }

        let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
        let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
        let faces = faceDetector?.features(in: personciImage)


        // For converting the Core Image Coordinates to UIView Coordinates
        let ciImageSize = personciImage.extent.size
        var transform = CGAffineTransform(scaleX: 1, y: -1)
        transform = transform.translatedBy(x: 0, y: -ciImageSize.height)

        for face in faces as! [CIFaceFeature] {

            print("Found bounds are \(face.bounds)")

            // Apply the transform to convert the coordinates
            var faceViewBounds = face.bounds.applying(transform)

            // Calculate the actual position and size of the rectangle in the image view
            let viewSize = imageView.bounds.size
            let scale = min(viewSize.width / ciImageSize.width,
                            viewSize.height / ciImageSize.height)
            let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
            let offsetY = (viewSize.height - ciImageSize.height * scale) / 2

            faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
            faceViewBounds.origin.x += offsetX
            faceViewBounds.origin.y += offsetY

            let faceBox = UIView(frame: faceViewBounds)
            //let faceBox = UIView(frame: face.bounds)
            faceBox.layer.borderWidth = 3
            faceBox.layer.borderColor = UIColor.red.cgColor
            faceBox.backgroundColor = UIColor.clear
            imageView.addSubview(faceBox)

            if face.hasLeftEyePosition {
                print("Left eye bounds are \(face.leftEyePosition)")
            }

            if face.hasRightEyePosition {
                print("Right eye bounds are \(face.rightEyePosition)")
            }
        }
    }

    let picker = UIImagePickerController()
    picker.delegate = self
    picker.allowsEditing = true
    picker.sourceType = .camera
    picker.cameraDevice = .front
    self.present(picker, animated: true, completion: { _ in })

    func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [AnyHashable: Any]) {
        let chosenImage = info[UIImagePickerControllerEditedImage]
        self.imageView!.image = chosenImage as? UIImage
        picker.dismiss(animated: true, completion: { _ in })
    }

     // picker.dismiss(animated: true, completion: { _ in })
    func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
        picker.dismiss(animated: true, completion: { _ in })
    }
}

override func viewDidLoad() {

    let alert = UIAlertController(title: "Ooops!!!", message: "Camera is not connected", preferredStyle: UIAlertControllerStyle.alert)
    alert.addAction(UIAlertAction(title: "Connect", style: UIAlertActionStyle.default, handler: nil))
    self.present(alert, animated: true, completion: nil)

    super.viewDidLoad()
    // Do any additional setup after loading the view, typically from a nib.

}

override func didReceiveMemoryWarning() {
    super.didReceiveMemoryWarning()
    // Dispose of any resources that can be recreated.
}
}


推荐答案

之后通过你的代码查看,你甚至没有调用 detect()。我尝试修复它,如下所述,然而, detect()将返回零面部,如我在。

After looking through your code, you didn't even call detect() after you take a snap. I tried fixed it as described below, however, the detect() will return zero face found as I describe in Face Detection with Camera.

lazy var picker: UIImagePickerController = {
    let picker = UIImagePickerController()
    picker.delegate = self
    picker.allowsEditing = true
    picker.sourceType = .camera
    picker.cameraDevice = .front
    return picker
}()

@IBOutlet var imageView: UIImageView!
override func viewDidLoad() {
    super.viewDidLoad()
    imageView.contentMode = .scaleAspectFit
}

@IBAction func TakePhoto(_ sender: Any) {
    self.present(picker, animated: true, completion: nil)
}

// MARK: - UIImagePickerControllerDelegate
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
    if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
        self.imageView!.image = chosenImage
        // Got the image from camera, the imageView.image is not nil, so it's time for facial detection
        detect()
        picker.dismiss(animated: true, completion: nil)
    }
}


func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
    picker.dismiss(animated: true, completion: nil)
}

// MARK: - Face Detection

func detect() {

    guard let personciImage = CIImage(image: imageView.image!) else {
        return
    }

    let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
    let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
    let faces = faceDetector?.features(in: personciImage)


    // For converting the Core Image Coordinates to UIView Coordinates
    let ciImageSize = personciImage.extent.size
    var transform = CGAffineTransform(scaleX: 1, y: -1)
    transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
    print("faces.count = \(faces?.count)")

    for face in faces as! [CIFaceFeature] {

        print("Found bounds are \(face.bounds)")

        // Apply the transform to convert the coordinates
        var faceViewBounds = face.bounds.applying(transform)

        // Calculate the actual position and size of the rectangle in the image view
        let viewSize = imageView.bounds.size
        let scale = min(viewSize.width / ciImageSize.width,
                        viewSize.height / ciImageSize.height)
        let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
        let offsetY = (viewSize.height - ciImageSize.height * scale) / 2

        faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
        faceViewBounds.origin.x += offsetX
        faceViewBounds.origin.y += offsetY

        let faceBox = UIView(frame: faceViewBounds)
        //let faceBox = UIView(frame: face.bounds)
        faceBox.layer.borderWidth = 3
        faceBox.layer.borderColor = UIColor.red.cgColor
        faceBox.backgroundColor = UIColor.clear
        imageView.addSubview(faceBox)

        if face.hasLeftEyePosition {
            print("Left eye bounds are \(face.leftEyePosition)")
        }

        if face.hasRightEyePosition {
            print("Right eye bounds are \(face.rightEyePosition)")
        }
    }
}

这篇关于实时面部检测不起作用的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!

08-16 08:02