目标:在 Windows 上运行 OpenVINO 2021.4 版
实现路径:
cmake
命令阶段就爆出一个 This Intel TBB package is intended to be used only in the project with MSVC
的错。
准备工作
本文剩余内容主要是:
假设 OpenVINO 的安装路径是 C:\Program Files (x86)\Intel\openvino_2021.4.752\
需要设置的配置主要包括:
在环境变量 PATH
中添加以下内容
C:\Program Files (x86)\Intel\openvino_2021.4.752\opencv\bin
C:\Program Files (x86)\Intel\openvino_2021.4.752\inference_engine\bin\intel64\Debug
C:\Program Files (x86)\Intel\openvino_2021.4.752\inference_engine\bin\intel64\Release
项目属性-> VC++ 目录 -> 包含目录
中添加项目属性 -> C/C++ -> 常规 -> 附加包含目录
中添加# Debug 和 Release 模式都相同
C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\inference_engine\include
C:\Program Files (x86)\Intel\openvino_2021.4.752\opencv\include
项目属性 -> VC++ 目录 -> 库目录
中添加项目属性 -> 链接器 -> 常规 -> 附加库目录
中添加# Debug 模式
C:\Program Files %28x86%29\Intel\openvino_2021.4.752\opencv\lib
C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\inference_engine\lib\intel64\Debug
# Relase 模式
C:\Program Files (x86)\Intel\openvino_2021.4.752\opencv\lib
C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\inference_engine\lib\intel64\Release
xxxd.lib
而对应的 Relase 版都是 xxx.lib
/path/to/*.lib
的形式获取,但我没试过),所以写了个脚本打印所有 lib 文件名称,方便后续赋值# Release 模式
opencv_calib3d453.lib
opencv_core453.lib
opencv_dnn453.lib
opencv_features2d453.lib
opencv_flann453.lib
opencv_gapi453.lib
opencv_highgui453.lib
opencv_imgcodecs453.lib
opencv_imgproc453.lib
opencv_ml453.lib
opencv_objdetect453.lib
opencv_photo453.lib
opencv_stitching453.lib
opencv_video453.lib
opencv_videoio453.lib
inference_engine.lib
inference_engine_c_api.lib
inference_engine_transformations.lib
# Debug 模式
opencv_calib3d453d.lib
opencv_core453d.lib
opencv_dnn453d.lib
opencv_features2d453d.lib
opencv_flann453d.lib
opencv_gapi453d.lib
opencv_highgui453d.lib
opencv_imgcodecs453d.lib
opencv_imgproc453d.lib
opencv_ml453d.lib
opencv_objdetect453d.lib
opencv_photo453d.lib
opencv_stitching453d.lib
opencv_video453d.lib
opencv_videoio453d.lib
inference_engined.lib
inference_engine_c_apid.lib
inference_engine_transformationsd.lib
# 使用方式: python print_lib.py --dir /path/to/xxx.lib --debug --suffix ".lib"
import os
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dir", default=".")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--suffix", default=".lib")
return parser.parse_args()
def main(args):
if not os.path.exists(args.dir):
raise ValueError(f"Path {args.dir} doesn't exist")
suffix = args.suffix
debug_suffix = "d" + suffix
if args.debug:
lines = [file for file in os.listdir(args.dir) if file.endswith(debug_suffix)]
else:
lines = [file for file in os.listdir(args.dir) if file.endswith(suffix) and (not file.endswith(debug_suffix))]
for line in lines:
print(line)
if __name__ == '__main__':
main(parse_args())
项目属性 -> 调试 -> 环境
中添加内容# Release 模式
PATH=C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\inference_engine\bin\intel64\Release;C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\ngraph\lib;C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\inference_engine\external\tbb\bin;%PATH%
# Debug 模式
PATH=C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\inference_engine\bin\intel64\Debug;C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\ngraph\lib;C:\Program Files (x86)\Intel\openvino_2021.4.752\deployment_tools\inference_engine\external\tbb\bin;%PATH%
// 开始执行 OpenVINO
InferenceEngine::Core ie;
// 导入模型结构以及权重
auto network = ie.ReadNetwork(model_path, bin_path);
// 设置输入/输出相关参数,可以配置预处理内容
InferenceEngine::InputsDataMap input_info(network.getInputsInfo());
InferenceEngine::OutputsDataMap output_info(network.getOutputsInfo());
for (auto& item : input_info) {
auto input_data = item.second;
// 按需配置
input_data->setPrecision(InferenceEngine::Precision::FP32);
input_data->setLayout(InferenceEngine::Layout::NCHW);
input_data->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
input_data->getPreProcess().setColorFormat(InferenceEngine::ColorFormat::RGB);
}
for (auto& item : output_info) {
auto output_data = item.second;
output_data->setPrecision(Precision::FP32);
}
// 为推理做准备
// 注意,这两个对象可以重复使用,即创建一次即可
auto executable_network = ie.LoadNetwork(network, "CPU");
auto infer_request = executable_network.CreateInferRequest();
// 将输入信息拷贝到 input blob 相关内存中
for (auto& item : input_info) {
auto input_name = item.first;
auto input = infer_request.GetBlob(input_name);
float* data = static_cast<float*>(input->buffer());
// copy data to data ...
}
// 执行推理
infer_request.Infer();
// 获取推理结果
for (auto& item : output_info) {
auto output_name = item.first;
auto output = infer_request.GetBlob(output_name);
const float* output_blob_data =
static_cast<PrecisionTrait<Precision::FP32>::value_type*>(
output->buffer());
// do something with output_blob_data
}