自己开始用paddle时就是2.0版本了,所以不关心之前的版本。
进入paddle 环境
conda list
或者
pip list
如果没有安装onnx则先安装onnx,onnx版本匹配是一个令人头疼的问题。
暂且不考虑版本问题直接使用下面的命令安装。
pip install onnx
pip install paddle2onnx
导出onnx模型的大致步骤如下:
import os
import time
import paddle
# 从模型代码中导入模型
# 实例化模型
# 加载预训练模型参数
# 将模型设置为评估状态
# 定义输入数据
# ONNX模型导出
paddle.onnx.export(model, [path to the save onnx model], input_spec=[input_spec], opset_version=[opset version])
导出静态图
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
import argparse
import paddle
from paddle.jit import to_static
from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import init_model
from ppocr.utils.logging import get_logger
from tools.program import load_config, merge_config, ArgsParser
def main():
FLAGS = ArgsParser().parse_args()
config = load_config(FLAGS.config)
merge_config(FLAGS.opt)
logger = get_logger()
# build post process
post_process_class = build_post_process(config['PostProcess'], config['Global'])
# build model
# for rec algorithm
if hasattr(post_process_class, 'character'):
char_num = len(getattr(post_process_class, 'character'))
config['Architecture']["Head"]['out_channels'] = char_num
model = build_model(config['Architecture'])
init_model(config, model, logger)
model.eval()
# 定义输入数据
input_spec = paddle.static.InputSpec(shape=[1, 3, 32, 320], dtype='float32', name='data')
# ONNX模型导出
paddle.onnx.export(model, "ocr_rec", input_spec=[input_spec], opset_version=10)
if __name__ == '__main__':
main()
onnx推理
import onnx
import numpy as np
import onnxruntime
import cv2
onnx_file = './ocr_rec.onnx'
onnx_model = onnx.load(onnx_file)
onnx.checker.check_model(onnx_model)
print('The model is checked!')
x = np.random.random((1, 3, 32, 320)).astype('float32')
print("x:", x)
img = cv2.imread("./00000.jpg")
resized_image = cv2.resize(img, (320, 32))
img_in = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)
img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
img_in = np.expand_dims(img_in, axis=0)
img_in /= 255.0
# predict by ONNX Runtime
ort_sess = onnxruntime.InferenceSession(onnx_file)
ort_inputs = {ort_sess.get_inputs()[0].name: img_in}
ort_outs = ort_sess.run(None, ort_inputs)
print("Exported model has been predicted by ONNXRuntime!")
具体代码还不完善,后面会修改。
参考:PaddleOCR转ONNX推理
参考:PaddleOCR模型转ONNX__PaddlePaddle模型导出ONNX协议
参考:Paddle2.0:ONNX模型的导出和部署
参考:
参考: