我目前正在从事一个涉及将OpenCVSharp集成到Unity中的项目,以允许在游戏环境中跟踪眼睛。我设法将OpenCVSharp集成到Unity编辑器中,并且目前在游戏中可以进行眼睛检测(而不是跟踪)。它可以在网络摄像头图像中找到您的眼睛,然后在纹理中显示其当前检测到的位置,我将其显示在场景中。

但是,这会导致fps大幅下降,这主要是因为每一帧都将网络摄像头纹理转换为IPLimage,以便OpenCV可以处理它。在完成所有的眼睛检测之后,它必须将其转换回2D纹理以在场景中显示。因此可以理解,对于CPU来说,它太多了。 (据我所知,它仅在CPU上使用1个内核)。

有没有一种方法可以在不将纹理转换为IPLimage的情况下进行所有眼睛检测?或其他任何方法来修复fps下降。我尝试过的一些事情包括:

  • 限制其更新的帧。但这只是导致它
    才能平稳运行,然后在它必须
    更新。
  • 查看线程,但据我所知Unity不允许这样做。
    据我所知,它仅在CPU上使用1个内核,这似乎有点愚蠢。如果有办法可以解决这个问题?
  • 在相机上尝试了不同的分辨率,但是游戏可以实际平稳运行的分辨率太小,以至于无法实际检测到眼睛,更不用说跟踪了。

  • 我已经在下面包含了代码,如果您希望在代码编辑器中查看它,则这里是C# File的链接。任何建议或帮助将不胜感激!

    作为参考,我使用了here (eye detection using opencvsharp)中的代码。
    using UnityEngine;
    using System.Collections;
    using System;
    using System.IO;
    using OpenCvSharp;
    //using System.Xml;
    
    //using OpenCvSharp.Extensions;
    //using System.Windows.Media;
    //using System.Windows.Media.Imaging;
    
    
    
    public class CaptureScript : MonoBehaviour
    {
        public GameObject planeObj;
        public WebCamTexture webcamTexture;     //Texture retrieved from the webcam
        public Texture2D texImage;              //Texture to apply to plane
        public string deviceName;
    
        private int devId = 1;
        private int imWidth = 640;              //camera width
        private int imHeight = 360;             //camera height
        private string errorMsg = "No errors found!";
        static IplImage matrix;                 //Ipl image of the converted webcam texture
    
        CvColor[] colors = new CvColor[]
        {
            new CvColor(0,0,255),
            new CvColor(0,128,255),
            new CvColor(0,255,255),
            new CvColor(0,255,0),
            new CvColor(255,128,0),
            new CvColor(255,255,0),
            new CvColor(255,0,0),
            new CvColor(255,0,255),
        };
    
        const double Scale = 1.25;
        const double ScaleFactor = 2.5;
        const int MinNeighbors = 2;
    
    
    // Use this for initialization
        void Start ()
        {
                //Webcam initialisation
                WebCamDevice[] devices = WebCamTexture.devices;
                Debug.Log ("num:" + devices.Length);
    
                for (int i=0; i<devices.Length; i++) {
                        print (devices [i].name);
                        if (devices [i].name.CompareTo (deviceName) == 1) {
                                devId = i;
                        }
                }
    
                if (devId >= 0) {
                        planeObj = GameObject.Find ("Plane");
                        texImage = new Texture2D (imWidth, imHeight, TextureFormat.RGB24, false);
                        webcamTexture = new WebCamTexture (devices [devId].name, imWidth, imHeight, 30);
                        webcamTexture.Play ();
    
                        matrix = new IplImage (imWidth, imHeight, BitDepth.U8, 3);
                }
    
    
        }
    
        void Update ()
        {
            if (devId >= 0)
            {
                    //Convert webcam texture to iplimage
                    Texture2DtoIplImage();
    
                /*DO IMAGE MANIPULATION HERE*/
    
                //do eye detection on iplimage
                EyeDetection();
    
    
                /*END IMAGE MANIPULATION*/
    
                if (webcamTexture.didUpdateThisFrame)
                {
                    //convert iplimage to texture
                    IplImageToTexture2D();
                }
    
            }
            else
            {
                Debug.Log ("Can't find camera!");
            }
    
        }
    
        void EyeDetection()
        {
    
            using(IplImage smallImg = new IplImage(new CvSize(Cv.Round (imWidth/Scale), Cv.Round(imHeight/Scale)),BitDepth.U8, 1))
            {
                using(IplImage gray = new IplImage(matrix.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor (matrix, gray, ColorConversion.BgrToGray);
                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                    Cv.EqualizeHist(smallImg, smallImg);
                }
    
    
                using(CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile (@"C:\Users\User\Documents\opencv\sources\data\haarcascades\haarcascade_eye.xml"))
                using(CvMemStorage storage = new CvMemStorage())
                {
                    storage.Clear ();
                    CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
                    for(int i = 0; i < eyes.Total; i++)
                    {
                        CvRect r = eyes[i].Value.Rect;
                        CvPoint center = new CvPoint{ X = Cv.Round ((r.X + r.Width * 0.5) * Scale), Y = Cv.Round((r.Y + r.Height * 0.5) * Scale) };
                        int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                        matrix.Circle (center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                    }
                }
    
            }
        }
    
        void OnGUI ()
        {
                GUI.Label (new Rect (200, 200, 100, 90), errorMsg);
        }
    
        void IplImageToTexture2D ()
        {
                int jBackwards = imHeight;
    
                for (int i = 0; i < imHeight; i++) {
                        for (int j = 0; j < imWidth; j++) {
                                float b = (float)matrix [i, j].Val0;
                                float g = (float)matrix [i, j].Val1;
                                float r = (float)matrix [i, j].Val2;
                                Color color = new Color (r / 255.0f, g / 255.0f, b / 255.0f);
    
    
                                jBackwards = imHeight - i - 1; // notice it is jBackward and i
                                texImage.SetPixel (j, jBackwards, color);
                        }
                }
                texImage.Apply ();
                planeObj.renderer.material.mainTexture = texImage;
    
        }
    
        void Texture2DtoIplImage ()
        {
                int jBackwards = imHeight;
    
                for (int v=0; v<imHeight; ++v) {
                        for (int u=0; u<imWidth; ++u) {
    
                                CvScalar col = new CvScalar ();
                                col.Val0 = (double)webcamTexture.GetPixel (u, v).b * 255;
                                col.Val1 = (double)webcamTexture.GetPixel (u, v).g * 255;
                                col.Val2 = (double)webcamTexture.GetPixel (u, v).r * 255;
    
                                jBackwards = imHeight - v - 1;
    
                                matrix.Set2D (jBackwards, u, col);
                                //matrix [jBackwards, u] = col;
                        }
                }
        }
    }
    

    最佳答案

    您可以将它们移出每帧更新循环:

    using(CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile (@"C:\Users\User\Documents\opencv\sources\data\haarcascades\haarcascade_eye.xml"))
    using(CvMemStorage storage = new CvMemStorage())
    

    没有理由在每一帧建立识别器图形。

    如果您想进行真正的速度更新,则线程化是前进的逻辑方法,团结本身不是线程化的,但是如果您小心一点,也可以折叠其他线程。

    在主线程上执行纹理-> ipl图像,然后触发一个事件以触发线程。
    该线程可以完成所有CV工作,可能会构造tex2d,然后将其推回main进行渲染。

    10-08 09:04