简介
本文将介绍如何使用 ONNX 进行 YOLOv8 Oriented Bounding Box (OBB) 推理。本例中,我们将使用 Python 编写的代码进行图像处理和对象检测,并展示如何加载模型、预处理图像、进行推理以及后处理结果。
代码
以下是实现 YOLOv8 OBB 推理的完整代码:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @FileName : YOLOv8_OBB.py
# @Time : 2024-07-25 17:33:48
# @Author : XuMing
# @Email : [email protected]
# @description : YOLOv8 Oriented Bounding Box Inference using ONNX
"""
import cv2
import math
import random
import numpy as np
import onnxruntime as ort
from loguru import logger
class RotatedBOX:
def __init__(self, box, score, class_index):
self.box = box
self.score = score
self.class_index = class_index
class ONNXInfer:
def __init__(self, onnx_model, class_names, device='auto', conf_thres=0.5, nms_thres=0.4) -> None:
self.onnx_model = onnx_model
self.class_names = class_names
self.conf_thres = conf_thres
self.nms_thres = nms_thres
self.device = self._select_device(device)
logger.info(f"Loading model on {
self.device}...")
self.session_model = ort.InferenceSession(
self.onnx_model,
providers=self.device,
sess_options=self._get_session_options()
)
def _select_device(self, device):
"""
Select the appropriate device.
:param device: 'auto', 'cuda', or 'cpu'.
:return: List of providers.
"""
if device == 'cuda' or (device == 'auto' and ort.get_device() == 'GPU'):
return ['CUDAExecutionProvider', 'CPUExecutionProvider']
return ['CPUExecutionProvider']
def _get_session_options(self):
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_options.intra_op_num_threads = 4
return sess_options
def preprocess