1. pytorch2onnx
python tools/deployment/pytorch2onnx.py configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py work_dirs/faster_rcnn_r50_fpn_2x_coco/epoch_24.pth

import onnx
import numpy as np
import onnxruntime
import cv2
import torch
onnx_file = './tmp.onnx'
onnx_model = onnx.load(onnx_file)
onnx.checker.check_model(onnx_model)
print('The model is checked!')
coco_names = ['0','1', '2', '3', '4', '5', '6','7', '8', '9', '10', '11', '12','13', '14']
img = cv2.imread("D:\\workshop\\temp\\16.png")
src = np.copy(img)
img = cv2.resize(img, (1333, 800))
# cv2.imshow("img", img)
# cv2.waitKey(0)
img = img.astype('float32') / 255.0
w, h , c= img.shape
# img = img / 255.
print(img)
img = img.transpose((2, 1, 0))
x1 = np.ndarray(shape=(1, c, h, w)).astype('float32')
x1[0] = img
x = np.random.random((1,3,1333,800)).astype('float32')
# print("x:",x)
# predict by ONNX Runtime
sess = onnxruntime.InferenceSession(onnx_file)
inputs = {sess.get_inputs()[0].name: x1}
outs = sess.run(None, inputs)
boxes = outs[0][:,0:4] # boxes
labels = outs[1] # labels
scores = outs[0][ :,4] # scores
print(boxes.shape, boxes.dtype, labels.shape, labels.dtype)
index = 0
for box in boxes:
if scores[index] > 0.3:
cv2.rectangle(src, (np.int32(box[0]), np.int32(box[1])),
(np.int32(box[2]), np.int32(box[3])), (0, 255, 255), 2, 8)
label_id = labels[index]
label_txt = coco_names[label_id]
cv2.putText(src, label_txt, (np.int32(box[0]), np.int32(box[1])), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 255), 1)
index += 1
cv2.imshow("Faster-RCNN Detection Demo", src)
cv2.waitKey(0)
print("output: ", outs)
print("Exported model has been predicted by ONNXRuntime!")
2. onnx2tensorrt