任何C++专家都可以告诉我为什么出现以下错误
我试过几次重新创建头文件。
header 是否有问题?
inVideoPlayer中没有名为idleMovie的成员
void testApp::setup(){
// setup video dimensions
videoWidth = 320;
videoHeight = 240;
outputWidth = 320;
outputHeight = 240;
// masked image ofTexture
maskedImg.allocate(outputWidth,outputHeight,GL_RGBA);
// videograbber init
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(videoWidth,videoHeight);
// background quicktime
vidPlayer.loadMovie("gracht5000.mov");
vidPlayer.play();
// video source
colorImg.allocate(outputWidth,outputHeight);
// grayscale source
grayImage.allocate(outputWidth,outputHeight);
// static difference image
grayBg.allocate(outputWidth,outputHeight);
// difference (mask) between grayscale source and static image
grayDiff.allocate(outputWidth,outputHeight);
bLearnBakground = true;
threshold = 80;
}
//--------------------------------------------------------------
void testApp::update()
{
ofBackground(100,100,100);
bool bNewFrame = false;
vidPlayer.idleMovie();
vidGrabber.grabFrame();
bNewFrame = vidGrabber.isFrameNew();
if (bNewFrame)
{
colorImg.setFromPixels(vidGrabber.getPixels(), outputWidth,outputHeight);
grayImage = colorImg;
// learn new background image
if (bLearnBakground == true){
grayBg = grayImage; // the = sign copys the pixels from grayImage into grayBg (operator overloading)
bLearnBakground = false;
}
// take the abs value of the difference between background and incoming and then threshold:
grayDiff.absDiff(grayBg, grayImage);
grayDiff.threshold(threshold);
grayDiff.blur( 3 );
// pixels array of the mask
unsigned char * maskPixels = grayDiff.getPixels();
// pixel array of webcam video
unsigned char * colorPixels = colorImg.getPixels();
// numpixels in mask
int numPixels = outputWidth * outputHeight;
// masked video image (RGBA) (final result)
unsigned char * maskedPixels = new unsigned char[outputWidth*outputHeight*4];
// loop the mask
for(int i = 0; i < numPixels; i+=1 )
{
int basePixelRGBA = 4 * i;
int basePixelRGB = 3 * i;
// compose final result
maskedPixels[ basePixelRGBA + 0 ] = colorPixels[basePixelRGB]; // take pixels from webcam source
maskedPixels[ basePixelRGBA + 1 ] = colorPixels[basePixelRGB+1]; // take pixels from webcam source
maskedPixels[ basePixelRGBA + 2 ] = colorPixels[basePixelRGB+2]; // take pixels from webcam source
maskedPixels[ basePixelRGBA + 3 ] = maskPixels[i]; // alpha channel from mask pixel array
}
// load final image into texture
maskedImg.loadData(maskedPixels, outputWidth,outputHeight, GL_RGBA );
}
}
//--------------------------------------------------------------
void testApp::draw(){
ofSetColor(0xffffff);
// draw bg video
vidPlayer.draw(0,0);
// draw masked webcam feed
ofEnableAlphaBlending();
maskedImg.draw(20,20);
ofDisableAlphaBlending();
// info
ofSetColor(0xffffff);
char reportStr[1024];
sprintf(reportStr, "bg subtraction and blob detection\npress ' ' to capture bg\nthreshold %i (press: +/-)\n, fps: %f", threshold, ofGetFrameRate());
ofDrawBitmapString(reportStr, 20, 600);
}
标题
#pragma once
#include "ofMain.h"
#include "ofxOpenCv.h"
class ofApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
int videoWidth;
int videoHeight;
int outputWidth;
int outputHeight;
int threshold;
ofVideoGrabber vidGrabber;
ofVideoPlayer vidPlayer;
ofxCvColorImage colorImg;
ofxCvGrayscaleImage grayImage;
ofxCvGrayscaleImage grayBg;
ofxCvGrayscaleImage grayDiff;
ofTexture maskedImg;
bool bLearnBakground;
};
最佳答案
vidPlayer.idleMovie();
替换为vidPlayer.update();
和vidGrabber.grabFrame();
替换为vidGrabber.update();