我已经创建了带有CvANN_MLP类的神经网络,并使用了2.4.6版本的Opencv库。我的Cv_ANN mlp网络是:
Mat trainingData(NUMERO_ESEMPI_TOTALE, 59, CV_32FC1);
Mat trainingClasses(NUMERO_ESEMPI_TOTALE, 1, CV_32FC1);
for(int i=0;i<NUMERO_ESEMPI_TOTALE;i++){
for(int j=0;j<59;j++){
trainingData.at<float>(i,j) = featureVect[i][j];
}
}
for(int i=0;i<NUMERO_ESEMPI_TOTALE;i++){
trainingClasses.at<float>(i,0) = featureVect[i][59];
}
Mat testData (NUMERO_ESEMPI_TEST , 59, CV_32FC1);
Mat testClasses (NUMERO_ESEMPI_TEST , 1, CV_32FC1);
for(int i=0;i<NUMERO_ESEMPI_TEST;i++){
for(int j=0;j<59;j++){
testData.at<float>(i,j) = featureVectTest[i][j];
}
}
//0 bocca, 1 non bocca.
testClasses.at<float>(0,0) = 1;
testClasses.at<float>(1,0) = 0;
testClasses.at<float>(2,0) = 1;
testClasses.at<float>(3,0) = 1;
testClasses.at<float>(4,0) = 0;
testClasses.at<float>(5,0) = 1;
testClasses.at<float>(6,0) = 0;
testClasses.at<float>(7,0) = 1;
testClasses.at<float>(8,0) = 1;
testClasses.at<float>(9,0) = 0;
testClasses.at<float>(10,0) = 0;
testClasses.at<float>(11,0) = 1;
testClasses.at<float>(12,0) = 0;
testClasses.at<float>(13,0) = 0;
testClasses.at<float>(14,0) = 0;
testClasses.at<float>(15,0) = 0;
testClasses.at<float>(16,0) = 0;
testClasses.at<float>(17,0) = 0;
testClasses.at<float>(18,0) = 0;
testClasses.at<float>(19,0) = 1;
testClasses.at<float>(20,0) = 1;
testClasses.at<float>(21,0) = 0;
testClasses.at<float>(22,0) = 1;
testClasses.at<float>(23,0) = 0;
testClasses.at<float>(24,0) = 1;
testClasses.at<float>(25,0) = 0;
testClasses.at<float>(26,0) = 0;
testClasses.at<float>(27,0) = 1;
testClasses.at<float>(28,0) = 1;
testClasses.at<float>(29,0) = 1;
Mat layers = Mat(3, 1, CV_32SC1);
layers.row(0) = Scalar(59);
layers.row(1) = Scalar(3);
layers.row(2) = Scalar(1);
CvANN_MLP mlp;
CvANN_MLP_TrainParams params;
CvTermCriteria criteria;
criteria.max_iter = 100;
criteria.epsilon = 0.0000001;
criteria.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS;
params.train_method = CvANN_MLP_TrainParams :: BACKPROP;
params.bp_dw_scale = 0.05 ;
params.bp_moment_scale = 0.05 ;
params.term_crit = criteria ;
mlp.create(layers);
// train
mlp.train(trainingData,trainingClasses,Mat(),Mat(),params);
Mat response(1, 1, CV_32FC1);
Mat predicted(testClasses.rows, 1, CV_32F);
Mat pred(NUMERO_ESEMPI_TEST, 1, CV_32FC1);
Mat pred1(NUMERO_ESEMPI_TEST, 1, CV_32FC1);
for(int i = 0; i < testData . rows ; i++){
Mat response(1, 1, CV_32FC1);
Mat sample = testData.row(i);
mlp.predict(sample,response);
predicted.at<float>(i ,0) = response.at <float>(0,0);
pred.at<float>(i,0)=predicted.at<float>(i ,0);
pred1.at<float>(i,0)=predicted.at<float>(i ,0);
file<<"Value Image "<<i<<": "<<predicted.at<float>(i ,0)<<"\n";
//cout<<"Value Image "<<i<<": "<<predicted.at<float>(i ,0)<<endl;
}
问题是该网络为每个测试样本返回了相同的结果。我不知道为什么我的网络将一组具有59个输入值和1个输出值的特征 vector 作为输入。
你可以帮帮我吗?
最佳答案
我有一个类似的问题。由于mlp的create函数的默认参数而导致出现问题。它无法成功创建S型函数,并且在这种情况下也无法进行训练,您将获得相同的结果。因此解决方案是使用creat函数,如下所示:
mlp.create(图层,CvANN_MLP::SIGMOID_SYM,1,1)
我的问题在这里:OpenCV Neural Network Sigmoid Output