System: Centos7.4
I:OpenVINO 的安装
refer:https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html
II: 基于OpenVINO tensorflow 的model optimizer 参考(SSD部分)
https://www.cnblogs.com/fourmi/p/10888513.html
执行路径:/opt/intel/openvino/deployment_tools/model_optimizer
执行指令:
python3.6 mo_tf.py --input_model=/home/gsj/object-detection/test_models/ssd_inception_v2_coco_2018_01_28/frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/intel/openvino_2019.2.201/deployment_tools/model_optimizer/extensions/front/tf/ssd_v2_support.json --tensorflow_object_detection_api_pipeline_config /home/gsj/object-detection/test_models/ssd_inception_v2_coco_2018_01_28/pipeline.config --reverse_input_channels --batch 32
II:改写文件路径:
/opt/intel/openvino/inference_engine/samples/self_object_detection
包含文件
main.cpp self_object_detection_engine_head.h
CMakeLists.txt README.md self_object_detection.h
1. main.cpp
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/*******************************************************************
* Copyright:2019-2030, CC
* File Name: Main.cpp
* Description: main function includes ObjectDetection,
* Initialize_Check, readInputimagesNames,
* load_inference_engine, readIRfiles,
* prepare_input_blobs, load_and_create_request,*
* prepare_input , process
*Author: Gao Shengjun
*Date: 2019-07-19
*******************************************************************/
#include <gflags/gflags.h>
#include <iostream>
#include <string>
#include <memory>
#include <vector>
#include <algorithm>
#include <map> #include <format_reader_ptr.h>
#include <inference_engine.hpp>
#include <ext_list.hpp> #include <samples/common.hpp>
#include <samples/slog.hpp>
#include <samples/args_helper.hpp> #include <vpu/vpu_tools_common.hpp>
#include <vpu/vpu_plugin_config.hpp> #include "self_object_detection_engine_head.h" bool ParseAndCheckCommandLine(int argc, char *argv[]) {
gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
if (FLAGS_h) {
showUsage();
showAvailableDevices();
return false;
} slog::info << "Parsing input parameters" << slog::endl; if (FLAGS_i.empty()) {
throw std::logic_error("Parameter -i is not set");
} if (FLAGS_m.empty()) {
throw std::logic_error("Parameter -m is not set");
} return true;
} static std::map<std::string, std::string> configure(const std::string& confFileName) {
auto config = parseConfig(confFileName); return config;
} void Initialize_Check_params(int argc,char* argv[]){
slog::info << "InferenceEngine: " << GetInferenceEngineVersion()<<"\n";
if(!ParseAndCheckCommandLine(argc,argv)){
slog::info<<"Check successfully"<<"\n";
return ;
}
return;
} std::vector<std::string> readInputimagesNames(){
std::vector<std::string>images;
parseInputFilesArguments(images);
if(images.empty()) throw std::logic_error("No suitable images were found");
return images;
} void load_inference_engine(Core &ie){
slog::info <<"Loading Inference Engine"<<slog::endl;
slog::info <<"Device info:" <<slog::endl;
std::cout<<ie.GetVersions(FLAGS_d);
if(FLAGS_p_msg){
ie.SetLogCallback(error_listener);
}
if (FLAGS_d.find("CPU")!=std::string::npos){
ie.AddExtension(std::make_shared<Extensions::Cpu::CpuExtensions>(),"CPU");
}
if(!FLAGS_l.empty()){
IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr,"CPU");
slog::info <<"CPU Extension loaded: "<<FLAGS_l<<slog::endl;
}
if(!FLAGS_c.empty()){
ie.SetConfig({{PluginConfigParams::KEY_CONFIG_FILE,FLAGS_c}},"GPU");
slog::info<<"GPU Extension loaded: "<<FLAGS_c<<slog::endl;
} } struct NetworkReader_networkinfo readIRfiles(){
struct NetworkReader_networkinfo nettools;
std::string binFileName = fileNameNoExt(FLAGS_m) +".bin";
slog::info << "Loading network files:"
"\n\t" << FLAGS_m <<
"\n\t" << binFileName <<
slog::endl;
CNNNetReader networkReader;
networkReader.ReadNetwork(FLAGS_m);
networkReader.ReadWeights(binFileName);
CNNNetwork network = networkReader.getNetwork();
nettools.networkReader = networkReader;
nettools.network = network;
return nettools;
} struct inputInfo_imageName prepare_input_blobs( CNNNetwork &network,CNNNetReader &networkReader, InputsDataMap &inputsInfo){
slog::info << "Preparing input blobs" << slog::endl;
struct inputInfo_imageName res;
if (inputsInfo.size() != && inputsInfo.size() != ) throw std::logic_error("Sample supports topologies only with 1 or 2 inputs");
std::string imageInputName,imInfoInputName;
InputInfo::Ptr inputInfo = nullptr;
SizeVector inputImageDims;
for(auto &item:inputsInfo){
if(item.second->getInputData()->getTensorDesc().getDims().size()==){
imageInputName = item.first;
inputInfo = item.second;
slog::info<<"Batch size is "<<std::to_string(networkReader.getNetwork().getBatchSize())<<slog::endl;
Precision inputPrecision =Precision::U8;
item.second->setPrecision(inputPrecision);
}else if(item.second->getInputData()->getTensorDesc().getDims().size()==){
imInfoInputName = item.first;
Precision inputPrecision = Precision::FP32;
item.second->setPrecision(inputPrecision);
if((item.second->getTensorDesc().getDims()[]!= && item.second->getTensorDesc().getDims()[]!=)){
throw std::logic_error("Invalid input info. Should be 3 or 6 values length");
}
}
}
if(inputInfo == nullptr){
inputInfo = inputsInfo.begin()->second;
}
res.inputInfo = inputInfo;
res.InputName = imageInputName;
res.imInfoInputName=imInfoInputName;
return res;
} struct outputInfoStruct prepare_output_blobs(CNNNetwork &network){
struct outputInfoStruct res_output;
slog::info << "Preparing output blobs" << slog::endl;
OutputsDataMap outputsInfo(network.getOutputsInfo());
std::string outputName;
DataPtr outputInfo;
for(const auto& out : outputsInfo){
if(out.second->getCreatorLayer().lock()->type=="DetectionOutput"){
outputName = out.first;
outputInfo = out.second;
}
}
if(outputInfo == nullptr){
throw std::logic_error("Can't find a DetectionOutput layers in the topology");
}
const SizeVector outputDims = outputInfo->getTensorDesc().getDims();
res_output.maxProposalCount=outputDims[];
res_output.objectSize=outputDims[];
res_output.outputName=outputName;
if (res_output.objectSize != ) {
throw std::logic_error("Output item should have 7 as a last dimension");
} if (outputDims.size() != ) {
throw std::logic_error("Incorrect output dimensions for SSD model");
} outputInfo->setPrecision(Precision::FP32);
return res_output;
} struct exenet_requests load_and_create_request(CNNNetwork& network,Core &ie){
struct exenet_requests res;
slog::info << "Loading model to the device" << slog::endl;
res.executable_network = ie.LoadNetwork(network, FLAGS_d, configure(FLAGS_config));
slog::info << "Create infer request" << slog::endl;
res.infer_request = res.executable_network.CreateInferRequest();
return res;
} struct res_outputStruct prepare_input(std::vector<std::string>& images,CNNNetwork& network, struct inputInfo_imageName& res,InferRequest& infer_request,InputsDataMap& inputsInfo){
struct res_outputStruct output_res2;
std::vector<std::shared_ptr<unsigned char>> imageData,originalImagesData;
std::vector<size_t>imageWidths,imageHeights;
for(auto &i : images){
FormatReader::ReaderPtr reader(i.c_str());
if(reader.get()==nullptr){
slog::warn << "Image" + i + "cannot be read!" <<slog::endl;
continue;
}
std::shared_ptr<unsigned char>originalData(reader->getData());
std::shared_ptr<unsigned char>data(reader->getData(res.inputInfo->getTensorDesc().getDims()[],res.inputInfo->getTensorDesc().getDims()[]));
if(data.get()!=nullptr){
originalImagesData.push_back(originalData);
imageData.push_back(data);
imageWidths.push_back(reader->width());
imageHeights.push_back(reader->height()); } }
if(imageData.empty())throw std::logic_error("Valid input images were not found!");
size_t batchSize = network.getBatchSize();
slog::info << "Batch Size is "<<std::to_string(batchSize)<<slog::endl;
if(batchSize!=imageData.size()){
slog::warn << "Number of images " + std::to_string(imageData.size()) + \
"dosen't match batch size "+std::to_string(batchSize)<<slog::endl;
batchSize = std::min(batchSize,imageData.size());
slog::warn <<"Number of images to be processed is "<<std::to_string(batchSize)<<slog::endl;
}
Blob::Ptr imageInput = infer_request.GetBlob(res.InputName);
size_t num_channels = imageInput->getTensorDesc().getDims()[];
size_t image_size=imageInput->getTensorDesc().getDims()[]*imageInput->getTensorDesc().getDims()[];
unsigned char* data = static_cast<unsigned char*>(imageInput->buffer());
for(size_t image_id = ; image_id < std::min(imageData.size(),batchSize);++image_id){
for(size_t pid = ; pid < image_size; pid++){
for(size_t ch = ; ch < num_channels;++ch){
data[image_id*image_size*num_channels + ch*image_size+pid] = imageData.at(image_id).get()[pid*num_channels + ch];
}
}
}
if(res.imInfoInputName!=""){
Blob::Ptr input2 = infer_request.GetBlob(res.imInfoInputName);
auto imInfoDim = inputsInfo.find(res.imInfoInputName)->second->getTensorDesc().getDims()[];
float *p = input2->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
for(size_t image_id=;image_id<std::min(imageData.size(),batchSize);++image_id){
p[image_id*imInfoDim+] = static_cast<float>(inputsInfo[res.InputName]->getTensorDesc().getDims()[]);
p[image_id*imInfoDim+] = static_cast<float>(inputsInfo[res.InputName]->getTensorDesc().getDims()[]);
for(size_t k = ; k < imInfoDim; ++k){
p[image_id*imInfoDim+k]=1.0f;
}
}
}
output_res2.originalImagesData=originalImagesData;
output_res2.imageWidths=imageWidths;
output_res2.imageHeights=imageHeights;
output_res2.batchSize=batchSize;
slog::info<<"Start inference"<<slog::endl;
infer_request.Infer();
return output_res2;
} void process(InferRequest& infer_request,std::string& outputName,size_t& batchSize,const int& maxProposalCount,const int& objectSize,std::vector<size_t>& imageWidths,std::vector<size_t>& imageHeights,std::vector<std::shared_ptr<unsigned char>>& originalImagesData){
slog::info << "Processing output blobs" <<slog::endl;
const Blob::Ptr output_blob = infer_request.GetBlob(outputName);
const float* detection = static_cast<PrecisionTrait<Precision::FP32>::value_type*>(output_blob->buffer());
std::vector<std::vector<int>>boxes(batchSize);
std::vector<std::vector<int>>classes(batchSize);
std::cout<<imageWidths[]<<"--"<<imageHeights[]<<" "<<detection[]<<std::endl; for(int curProposal = ; curProposal < maxProposalCount;curProposal++){
auto image_id = static_cast<int>(detection[curProposal * objectSize +]);
if(image_id < ){break;}
float confidence =detection[curProposal * objectSize + ];
auto label = static_cast<int>(detection[curProposal * objectSize + ]);
auto xmin = static_cast<int>(detection[curProposal * objectSize + ] * imageWidths[image_id]);
auto ymin = static_cast<int>(detection[curProposal * objectSize + ] * imageHeights[image_id]);
auto xmax = static_cast<int>(detection[curProposal * objectSize + ] * imageWidths[image_id]);
auto ymax = static_cast<int>(detection[curProposal * objectSize + ] * imageHeights[image_id]);
std::cout << "[" << curProposal << "," << label << "] element, prob = " << confidence <<
" (" << xmin << "," << ymin << ")-(" << xmax << "," << ymax << ")" << " batch id : " << image_id;
if(confidence > 0.3){
classes[image_id].push_back(label);
boxes[image_id].push_back(xmin);
boxes[image_id].push_back(ymin);
boxes[image_id].push_back(xmax - xmin);
boxes[image_id].push_back(ymax - ymin);
std::cout << " WILL BE PRINTED!";
}
std::cout<<std::endl;
}
for(size_t batch_id = ; batch_id < batchSize; ++batch_id){
addRectangles(originalImagesData[batch_id].get(),imageHeights[batch_id],imageWidths[batch_id],boxes[batch_id],classes[batch_id],BBOX_THICKNESS);
const std::string image_path = "out_" + std::to_string(batch_id) + ".bmp";
if (writeOutputBmp(image_path, originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id])) {
slog::info << "Image " + image_path + " created!" << slog::endl;
} else {
throw std::logic_error(std::string("Can't create a file: ") + image_path);
} } } /****************************************MAIN***************************************************/ int main(int argc, char *argv[]) {
try {
/** This sample covers certain topology and cannot be generalized for any object detection one **/
// --------------------------- 1. Parsing and validation of input args ---------------------------------
Initialize_Check_params(argc,argv);
// --------------------------- 2. Read input -----------------------------------------------------------
/** This vector stores paths to the processed images **/
std::vector<std::string> images = readInputimagesNames();
// ----------------------------------------------------------------------------------------------------- // --------------------------- 3. Load inference engine -------------------------------------
Core ie;
load_inference_engine(ie); // --------------------------- 4. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
CNNNetwork network = readIRfiles().network;
CNNNetReader networkReader = readIRfiles().networkReader;
// ----------------------------------------------------------------------------------------------------- // --------------------------- 5. Prepare input blobs --------------------------------------------------
/** Taking information about all topology inputs **/
InputsDataMap inputsInfo(network.getInputsInfo()); struct inputInfo_imageName res = prepare_input_blobs(network,networkReader, inputsInfo);
InputInfo::Ptr inputInfo = res.inputInfo;
std::string imageInputName = res.InputName;
std::string imInfoInputName = res.imInfoInputName; // ----------------------------------------------------------------------------------------------------- // --------------------------- 6. Prepare output blobs -------------------------------------------------
struct outputInfoStruct res_output = prepare_output_blobs(network);
const int maxProposalCount = res_output.maxProposalCount;
const int objectSize =res_output.objectSize;
std::string outputName = res_output.outputName; // ----------------------------------------------------------------------------------------------------- // --------------------------- 7. Loading model to the device ------------------------------------------
struct exenet_requests exe_req = load_and_create_request(network,ie);
ExecutableNetwork executable_network = exe_req.executable_network;
InferRequest infer_request = exe_req.infer_request;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 8. Prepare input --------------------------------------------------------
struct res_outputStruct out_struct =prepare_input(images,network,res,infer_request,inputsInfo);
std::vector<std::shared_ptr<unsigned char>>originalImagesData=out_struct.originalImagesData;
std::vector<size_t>imageWidths=out_struct.imageWidths;
std::vector<size_t>imageHeights=out_struct.imageHeights;
size_t batchSize = out_struct.batchSize;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 9. Process output -------------------------------------------------------
process(infer_request, outputName, batchSize, maxProposalCount, objectSize, imageWidths,imageHeights,originalImagesData);
// -----------------------------------------------------------------------------------------------------
}
catch (const std::exception& error) {
slog::err << error.what() << slog::endl;
return ;
}
catch (...) {
slog::err << "Unknown/internal exception happened." << slog::endl;
return ;
} slog::info << "Execution successful" << slog::endl;
slog::info << slog::endl << "This sample is an API example, for any performance measurements "
"please use the dedicated benchmark_app tool" << slog::endl;
return ;
}
2. self_object_detection_engine_head.h
/*******************************************************************
* Copyright:2019-2030, CC
* File Name: self_object_detection_engine_head.h
* Description: main function includes ObjectDetection,
* Initialize_Check, readInputimagesNames,
* load_inference_engine, readIRfiles,
* prepare_input_blobs, load_and_create_request,*
* prepare_input , process
*Author: Gao Shengjun
*Date: 2019-07-19
*******************************************************************/
#ifndef SELF_OBJECT_DETECTION_ENGINE_HEAD_H
#define SELF_OBJECT_DETECTION_ENGINE_HEAD_H #include "self_object_detection.h"
#include <gflags/gflags.h>
#include <iostream>
#include <string>
#include <memory>
#include <vector>
#include <algorithm>
#include <map> #include <format_reader_ptr.h>
#include <inference_engine.hpp>
#include <ext_list.hpp> #include <samples/common.hpp>
#include <samples/slog.hpp>
#include <samples/args_helper.hpp> #include <vpu/vpu_tools_common.hpp>
#include <vpu/vpu_plugin_config.hpp>
using namespace InferenceEngine;
ConsoleErrorListener error_listener; typedef struct NetworkReader_networkinfo{
CNNNetReader networkReader;
CNNNetwork network;
}NetworkReader_networkinfo; typedef struct inputInfo_imageName{
InputInfo::Ptr inputInfo;
std::string InputName;
std::string imInfoInputName; }inputInfo_imageName; typedef struct outputInfoStruct{
int maxProposalCount ;
int objectSize;
std::string outputName;
}outputInfoStruct; typedef struct exenet_requests{
ExecutableNetwork executable_network;
InferRequest infer_request; }exenet_requests; typedef struct res_outputStruct {
std::vector<std::shared_ptr<unsigned char>>originalImagesData;
std::vector<size_t>imageWidths;
std::vector<size_t>imageHeights;
size_t batchSize; }res_outputStruct; #ifdef __cplusplus
extern "C"
{
#endif /**
* @brief get the version of the InferenceEngine and parse the input params, print the help information about the instruct if needed, check the required input params eg. the file of model and the input images both are required. * @ param[in] argc: the number of the input params,
* argv: the vector to store the input params
* @ param[out] None
*/
void Initialize_Check_params(int argc,char* argv[]); /**
*@brief get the input images filenames and store the in a vector *@ param[in] None
*@ param[out] return the filenames of the images
*@ Parse the info of the inputimages by call " parseInputFilesArguments " build in OpenVINO
*/
std::vector<std::string> readInputimagesNames(); /**
* @brief load the extension_plugin according the specific devices,eg CPU,GPU
* @ param[in] Core &ie
* @ param[out] None
*/
void load_inference_engine(Core& ie); /**
* @brief read the proto and weights files which produced by openvino model optimizer tools of the network.
* @ param[in] flags_m: the model files *.bin,*.xml
* @ param[out] Struct contains NetworkReader and the structure info of the model
*/
struct NetworkReader_networkinfo readIRfiles(); /*
* @brief get the detail InputsInfo for the blob format according the InputsInfo, and set the precision of the inputs according to the input format ,eg,[NCHW]:U8 [HW]:FP32
* @ param[in] network,CNNNetReader,inputsInfo
* @ param[out] strcut contains the inputInfo,ImageInputName,the info name of the input Image
*/
struct inputInfo_imageName prepare_input_blobs( CNNNetwork &network,CNNNetReader &networkReader, InputsDataMap &inputsInfo); /*
* @brief get the info of the ouputs from the network by calling the OPENVINO's "getOutputsInof()"
* @ param[in] network
* @ param[out] strcut contains the model's maxProposals,objectSize,outputName
*/
struct outputInfoStruct prepare_output_blobs(CNNNetwork &network); /*
* @brief Load the network to the device and create the infer request
* @ param[in] network, Core
* @ param[out] struct contains the excuteable network and the inferRequest
*/
struct exenet_requests load_and_create_request(CNNNetwork& network,Core &ie); /*
* @brief read the input images and create the input blob and start the infer request
* @ param[in] images:the path of the images , network: the info of the model, inputInfo_imageName,inputInfo_imageName, infer_request, inputsInfo
* @ param[out] return the data of the input images and its height ,width,batchSize.
*/
struct res_outputStruct prepare_input(std::vector<std::string>& images,CNNNetwork& network, struct inputInfo_imageName& res,InferRequest& infer_request,InputsDataMap& inputsInfo); /*
* @brief get the output according the output_blob,and get the (label,xmin,ymin,xmax,ymax) about the detection boxes , I set the thread value :0.3 it gets the better result.
* @param [in] infer_request, outputName,batchSize,maxProposalCount,objectSize,imageWidths,imageHeights,originalImagesData
* @param [out] None
*/
void process(InferRequest& infer_request,std::string& outputName,size_t& batchSize,const int& maxProposalCount,const int& objectSize,std::vector<size_t>& imageWidths,std::vector<size_t>& imageHeights,std::vector<std::shared_ptr<unsigned char>>& originalImagesData);
#ifdef __cplusplus
}
#endif
#endif
3.self_object_detection.h
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
// #pragma once #include <string>
#include <vector>
#include <gflags/gflags.h>
#include <iostream> /* thickness of a line (in pixels) to be used for bounding boxes */
#define BBOX_THICKNESS 2 /// @brief message for help argument
static const char help_message[] = "Print a usage message."; /// @brief message for images argument
static const char image_message[] = "Required. Path to an .bmp image."; /// @brief message for model argument
static const char model_message[] = "Required. Path to an .xml file with a trained model."; /// @brief message for plugin argument
static const char plugin_message[] = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \
"the sample will look for this plugin only"; /// @brief message for assigning cnn calculation to device
static const char target_device_message[] = "Optional. Specify the target device to infer on (the list of available devices is shown below). " \
"Default value is CPU. Use \"-d HETERO:<comma-separated_devices_list>\" format to specify HETERO plugin. " \
"Sample will look for a suitable plugin for device specified"; /// @brief message for clDNN custom kernels desc
static const char custom_cldnn_message[] = "Required for GPU custom kernels. "\
"Absolute path to the .xml file with the kernels descriptions."; /// @brief message for user library argument
static const char custom_cpu_library_message[] = "Required for CPU custom layers. " \
"Absolute path to a shared library with the kernels implementations."; /// @brief message for plugin messages
static const char plugin_err_message[] = "Optional. Enables messages from a plugin"; /// @brief message for config argument
static constexpr char config_message[] = "Path to the configuration file. Default value: \"config\"."; /// \brief Define flag for showing help message <br>
DEFINE_bool(h, false, help_message); /// \brief Define parameter for set image file <br>
/// It is a required parameter
DEFINE_string(i, "", image_message); /// \brief Define parameter for set model file <br>
/// It is a required parameter
DEFINE_string(m, "", model_message); /// \brief device the target device to infer on <br>
DEFINE_string(d, "CPU", target_device_message); /// @brief Define parameter for clDNN custom kernels path <br>
/// Default is ./lib
DEFINE_string(c, "", custom_cldnn_message); /// @brief Absolute path to CPU library with user layers <br>
/// It is a optional parameter
DEFINE_string(l, "", custom_cpu_library_message); /// @brief Enable plugin messages
DEFINE_bool(p_msg, false, plugin_err_message); /// @brief Define path to plugin config
DEFINE_string(config, "", config_message); /**
* \brief This function show a help message
*/
static void showUsage() {
std::cout << std::endl;
std::cout << "object_detection_sample_ssd [OPTION]" << std::endl;
std::cout << "Options:" << std::endl;
std::cout << std::endl;
std::cout << " -h " << help_message << std::endl;
std::cout << " -i \"<path>\" " << image_message << std::endl;
std::cout << " -m \"<path>\" " << model_message << std::endl;
std::cout << " -l \"<absolute_path>\" " << custom_cpu_library_message << std::endl;
std::cout << " Or" << std::endl;
std::cout << " -c \"<absolute_path>\" " << custom_cldnn_message << std::endl;
std::cout << " -d \"<device>\" " << target_device_message << std::endl;
std::cout << " -p_msg " << plugin_err_message << std::endl;
}
IV: 编译
执行路径:
/opt/intel/openvino/inference_engine/samples
sh ./build_samples.sh
V: Test
执行路径:
/root/inference_engine_samples_build/intel64/Release
执行命令:
./self_object_detection -m /opt/intel/openvino_2019.2.201/deployment_tools/model_optimizer/./frozen_inference_graph.xml -d CPU -i /home/gsj/dataset/coco_val/val8
VI: result
[ INFO ] InferenceEngine:
API version ............ 2.0
Build .................. custom_releases//R2_3044732e25bc7dfbd11a54be72e34d512862b2b3
Description ....... API
Parsing input parameters
[ INFO ] Files were added:
[ INFO ] /home/gsj/dataset/coco_val/val8/.jpg
[ INFO ] /home/gsj/dataset/coco_val/val8/.jpg
[ INFO ] /home/gsj/dataset/coco_val/val8/.jpg
[ INFO ] /home/gsj/dataset/coco_val/val8/.jpg
[ INFO ] /home/gsj/dataset/coco_val/val8/.jpg
[ INFO ] /home/gsj/dataset/coco_val/val8/.jpg
[ INFO ] /home/gsj/dataset/coco_val/val8/.jpg
[ INFO ] /home/gsj/dataset/coco_val/val8/.jpg
[ INFO ] Loading Inference Engine
[ INFO ] Device info:
CPU
MKLDNNPlugin version ......... 2.0
Build ...........
[ INFO ] Loading network files:
/opt/intel/openvino_2019.2.201/deployment_tools/model_optimizer/./frozen_inference_graph.xml
/opt/intel/openvino_2019.2.201/deployment_tools/model_optimizer/./frozen_inference_graph.bin
[ INFO ] Loading network files:
/opt/intel/openvino_2019.2.201/deployment_tools/model_optimizer/./frozen_inference_graph.xml
/opt/intel/openvino_2019.2.201/deployment_tools/model_optimizer/./frozen_inference_graph.bin
[ INFO ] Preparing input blobs
[ INFO ] Batch size is
[ INFO ] Preparing output blobs
[ INFO ] Loading model to the device
[ INFO ] Create infer request
[ WARNING ] Image is resized from (, ) to (, )
[ WARNING ] Image is resized from (, ) to (, )
[ WARNING ] Image is resized from (, ) to (, )
[ WARNING ] Image is resized from (, ) to (, )
[ WARNING ] Image is resized from (, ) to (, )
[ WARNING ] Image is resized from (, ) to (, )
[ WARNING ] Image is resized from (, ) to (, )
[ WARNING ] Image is resized from (, ) to (, )
[ INFO ] Batch Size is
[ WARNING ] Number of images 8dosen't match batch size 32
[ WARNING ] Number of images to be processed is
[ INFO ] Start inference
[ INFO ] Processing output blobs
-- 0.452099
[,] element, prob = 0.851202 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.495117 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.376695 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.337178 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.668834 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.558071 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.432652 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.313619 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.488229 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.885867 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.305516 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.332538 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.992781 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.988277 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.981469 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.94848 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.428106 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.978832 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.333557 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.985633 (-,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.882272 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.874527 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.850498 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.844409 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.787552 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.748578 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.735457 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.712015 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.689215 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.620327 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.614535 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.609089 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.604894 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.554959 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.549844 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.404613 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.366167 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.320608 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.996094 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.9818 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.517957 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.302339 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.98227 (,)-(,) batch id : WILL BE PRINTED!
[,] element, prob = 0.924668 (,)-(,) batch id : WILL BE PRINTED!
[ INFO ] Image out_0.bmp created!
[ INFO ] Image out_1.bmp created!
[ INFO ] Image out_2.bmp created!
[ INFO ] Image out_3.bmp created!
[ INFO ] Image out_4.bmp created!
[ INFO ] Image out_5.bmp created!
[ INFO ] Image out_6.bmp created!
[ INFO ] Image out_7.bmp created!
[ INFO ] Execution successful [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool