(1) Tracking类中的 ORBextractorLeft
// Tracking类中的mpORBextractorleft是在Tracking类中的初始化函数中初始化的,这是因为提取器只需要一个就够了.
mpORBextractorLeft = new ORBextractor(nFeatures,fScaleFactor,nLevels,fIniThFAST,fMinThFAST);
// Tracking类的GrabImageMonocular函数根据当前灰度值mImGray,ORB提取器等变量来初始化当前帧成员mCurrentFrame,以达到统一接口的目的
mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth);
// 接着系统开始Track()
Track();
(2) Frame类中的mpORBextractorLeft
// Frame的构造函数
Frame::Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth) :mpORBvocabulary(voc),mpORBextractorLeft(extractor),mpORBextractorRight(static_cast<ORBextractor*>(NULL)), mTimeStamp(timeStamp), mK(K.clone()),mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth)
{
...
ExtractORB(0,imGray);
...
}
void Frame::ExtractORB(int flag, const cv::Mat &im) { if(flag==0) (*mpORBextractorLeft)(im,cv::Mat(),mvKeys,mDescriptors); //重载() else (*mpORBextractorRight)(im,cv::Mat(),mvKeysRight,mDescriptorsRight); }
(3) ORBextractor类中成员
成员变量:
std::vector<cv::Point> pattern; int nfeatures; double scaleFactor; int nlevels; int iniThFAST; int minThFAST; std::vector<int> mnFeaturesPerLevel; std::vector<int> umax; std::vector<float> mvScaleFactor; std::vector<float> mvInvScaleFactor; std::vector<float> mvLevelSigma2; std::vector<float> mvInvLevelSigma2;
ORBextractor的构造函数:
ORBextractor::ORBextractor(int _nfeatures, float _scaleFactor, int _nlevels, int _iniThFAST, int _minThFAST): nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels), iniThFAST(_iniThFAST), minThFAST(_minThFAST) { mvScaleFactor.resize(nlevels); mvLevelSigma2.resize(nlevels); mvScaleFactor[0]=1.0f; mvLevelSigma2[0]=1.0f; for(int i=1; i<nlevels; i++) { mvScaleFactor[i]=mvScaleFactor[i-1]*scaleFactor; mvLevelSigma2[i]=mvScaleFactor[i]*mvScaleFactor[i]; } mvInvScaleFactor.resize(nlevels); mvInvLevelSigma2.resize(nlevels); for(int i=0; i<nlevels; i++) { mvInvScaleFactor[i]=1.0f/mvScaleFactor[i]; mvInvLevelSigma2[i]=1.0f/mvLevelSigma2[i]; } mvImagePyramid.resize(nlevels); mnFeaturesPerLevel.resize(nlevels); float factor = 1.0f / scaleFactor; float nDesiredFeaturesPerScale = nfeatures*(1 - factor)/(1 - (float)pow((double)factor, (double)nlevels)); int sumFeatures = 0; for( int level = 0; level < nlevels-1; level++ ) { mnFeaturesPerLevel[level] = cvRound(nDesiredFeaturesPerScale); sumFeatures += mnFeaturesPerLevel[level]; nDesiredFeaturesPerScale *= factor; } mnFeaturesPerLevel[nlevels-1] = std::max(nfeatures - sumFeatures, 0); const int npoints = 512; const Point* pattern0 = (const Point*)bit_pattern_31_; std::copy(pattern0, pattern0 + npoints, std::back_inserter(pattern)); //This is for orientation // pre-compute the end of a row in a circular patch umax.resize(HALF_PATCH_SIZE + 1); int v, v0, vmax = cvFloor(HALF_PATCH_SIZE * sqrt(2.f) / 2 + 1); int vmin = cvCeil(HALF_PATCH_SIZE * sqrt(2.f) / 2); const double hp2 = HALF_PATCH_SIZE*HALF_PATCH_SIZE; for (v = 0; v <= vmax; ++v) umax[v] = cvRound(sqrt(hp2 - v * v)); // Make sure we are symmetric for (v = HALF_PATCH_SIZE, v0 = 0; v >= vmin; --v) { while (umax[v0] == umax[v0 + 1]) ++v0; umax[v] = v0; ++v0; } }
asklf