嘿,
我正在做一个使用光流法稳定视频序列的项目。
到目前为止,我的光流程做得很好。但是我面前有2个分支机构来进行工作。
1-在获得光流之后,我找到了图像位移的平均值,然后从第二帧的特征中减去了平均值,我的问题是下一步该怎么做?

2-或者我可以使用openCV函数来稳定图像,该函数先计算了转换矩阵,然后使用了cvPerspectiveTransform和cvWarpPerspective,但出现错误,是“错误标志”

您可以看到代码,我想要做什么来稳定图像?我想提供什么解决方案吗?

enter code here
#include <stdio.h>
#include <stdlib.h>
//#include "/usr/include/opencv/cv.h"
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
#include <math.h>
#include <iostream>

#define PI 3.1415926535898

double rads(double degs)
{
    return (PI/180 * degs);
}

CvCapture *cap;

IplImage *img;
IplImage *frame;
IplImage *frame1;
IplImage *frame3;
IplImage *frame2;
IplImage *temp_image1;
IplImage *temp_image2;
IplImage *frame1_1C;
IplImage *frame2_1C;
IplImage *eig_image;
IplImage *temp_image;
IplImage *pyramid1 = NULL;
IplImage *pyramid2 = NULL;

char * mapx;
char * mapy;

int h;
int corner_count;
CvMat* M = cvCreateMat(3,3,CV_32FC1);
CvPoint p,q,l,s;
double hypotenuse;
double angle;

int line_thickness = 1, line_valid = 1, pos = 0;
CvScalar line_color;
CvScalar target_color[4] = { // in BGR order
        {{   0,   0, 255,   0 }},  // red
        {{   0, 255,   0,   0 }},  // green
        {{ 255,   0,   0,   0 }}, // blue
        {{   0, 255, 255,   0 }}   // yellow
};

inline static double square(int a)
{
return a * a;
}

char* IntToChar(int num){return NULL;}

/*{
    char* retstr = static_cast<char*>(calloc(12, sizeof(char)));

    if (sprintf(retstr, "%i", num) > 0)
    {
        return retstr;
    }
    else
    {
        return NULL;
    }
}*/

inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels )
{
    if ( *img != NULL )
         return;

    *img = cvCreateImage( size, depth, channels );

    if ( *img == NULL )
    {
        fprintf(stderr, "Error: Couldn't allocate image.  Out of memory?\n");
        exit(-1);
    }
}

void clearImage (IplImage *img)
{
    for (int i=0; i<img->imageSize; i++)
        img->imageData[i] = (char) 0;
}

int main()
{
    cap = cvCaptureFromCAM(0);
    //cap = cvCaptureFromAVI("/home/saif/Desktop/NAO.. the project/jj/Test3.avi");

    CvSize frame_size;

    // Reading the video's frame size
    frame_size.height = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT );
    frame_size.width  = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_WIDTH );
    cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);

    while(true)
    {
    frame = cvQueryFrame( cap );

        if (frame == NULL)
        {
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
            return -1;
        }

        // Allocating another image if it is not allocated already.
        allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
        cvConvertImage(frame, frame1_1C, 0);
        allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
        cvConvertImage(frame, frame1, 0);

        //Get the second frame of video.
        frame = cvQueryFrame( cap );

        if (frame == NULL)
        {
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
            return -1;
        }

        if(!frame)
        {
            printf("bad video \n");
            exit(0);
        }

        allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
        cvConvertImage(frame, frame2_1C, 0);
        allocateOnDemand( &frame2, frame_size, IPL_DEPTH_8U, 3 );
        cvConvertImage(frame, frame2, 0);

        CvSize optical_flow_window = cvSize(5,5);
        eig_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );
        temp_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );

        CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

        // Feature tracking
        CvPoint2D32f frame1_features[4];
        CvPoint2D32f frame2_features[4];

        //cvCornerEigenValsAndVecs(eig_image, temp_image, 1 );
        corner_count = 4;

        cvGoodFeaturesToTrack(frame1_1C,eig_image , temp_image, frame1_features, &corner_count, 0.1, .01, NULL, 5, 1);
        cvFindCornerSubPix( frame1_1C, frame1_features, corner_count,cvSize(5, 5) ,optical_flow_window , optical_flow_termination_criteria);

        if ( corner_count <= 0 )
            printf( "\nNo features detected.\n" );
        else
            printf( "\nNumber of features found = %d\n", corner_count );

        //Locus Kande method.
        char optical_flow_found_feature[20];
        float optical_flow_feature_error[20];

        allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
        allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

        cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, corner_count, optical_flow_window, 5, optical_flow_found_feature, NULL, optical_flow_termination_criteria, NULL);

    /*
    double sumOfDistancesX = 0;
    double sumOfDistancesY = 0;

    int debug = 0;

     CvFont font1, font2;
     CvScalar red, green, blue;
     IplImage* seg_in = NULL;
     IplImage *seg_out = NULL;

     allocateOnDemand( &seg_in,  frame_size, IPL_DEPTH_8U, 3 );
     allocateOnDemand( &seg_out, frame_size, IPL_DEPTH_8U, 3 );

     clearImage(seg_in);
     clearImage(seg_in);

     for( int i=0; i <corner_count; i++ )
     {

         if ( optical_flow_found_feature[i] == 0 )
             continue;
         p.x = (int) frame1_features[i].x;
         p.y = (int) frame1_features[i].y;
         q.x = (int) frame2_features[i].x;
         q.y = (int) frame2_features[i].y;
         angle = atan2( (double) p.y - q.y, (double) p.x - q.x );

          sumOfDistancesX += q.x - p.x;
          sumOfDistancesY += q.y - p.y;

          //cvRemap(frame2,frame1,averageDistanceX , averageDistanceY,CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
      }
      */

      /*
      int averageDistanceX = sumOfDistancesX / corner_count;
      int averageDistanceY = sumOfDistancesY / corner_count;
      l.x = averageDistanceX - q.x;
      s.y = averageDistanceY - q.y;
      */

#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform

       //CvMat* N = cvCreateMat(3,3,CV_32FC1);

       cvGetPerspectiveTransform(frame2_features, frame1_features, M);
       cvPerspectiveTransform(frame1_features, frame2_features, M);
       cvWarpPerspective( frame2_features, frame1_features, M,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0) );

        cvShowImage("Optical Flow", frame1);
        cvWaitKey(50);
    }

    cvReleaseCapture(&cap);
    cvReleaseMat(&M);

    return 0;
}

最佳答案

您不想从第二张图像中减去平均位移,而是要通过平均位移来变换(移动)第二张图像,以使其“匹配”第一张图像。您使用的“位移”取决于您的情况。

  • 如果您的相机在晃动但静止不动,则希望将两个连续帧之间的平均位移用作第二个帧的转换 vector 。对于每个新帧,您都需要计算转换后的第一帧和新帧之间的位移,并转换新帧。
  • 如果您的相机移动并晃动(例如,将 Helm 式摄像机安装在山地自行车上),则希望首先找到几帧之间的帧之间的平均位移,然后通过该平均位移与它和上一帧。

  • 编辑
    对于选项2,您基本上需要做的是计算最近几帧中各帧之间平均移动的平均值。您可以通过多种方式来执行此操作,但是我建议您使用类似卡尔曼滤波器的方法。然后,对于新帧,您需要计算该帧与(校正后的)前一帧之间的运动。从运动中可以减去到该点的平均运动,然后将新帧移动该差值。

    关于c++ - 影像稳定,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/4247700/

    10-13 04:05