Newer
Older
Yolov5WhiteLineDetection / openvino / attempt_module.py
@sato sato on 1 Mar 2022 2 KB 最初のコミット
import sys
sys.path.append("../")

import time
from openvino.inference_engine import IECore
import numpy as np
import cv2
from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
                           increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator
import torch
from tqdm import tqdm

ie = IECore()
devices = ie.available_devices
for device in devices:
    device_name = ie.get_metric(device_name=device, metric_name="FULL_DEVICE_NAME")
    print(f"{device}: {device_name}")


ie = IECore()
model_xml = r"C:\Users\user\PycharmProjects\yolov5_v6.0\openvino\yolov5s_640x640_opt_int8.xml"
model_weights = r"C:\Users\user\PycharmProjects\yolov5_v6.0\openvino\yolov5s_640x640_opt_int8.bin"
net = ie.read_network(model=model_xml, weights=model_weights)

input_layer = next(iter(net.input_info))
print(f"input layout: {net.input_info[input_layer].layout}")
print(f"input precision: {net.input_info[input_layer].precision}")
print(f"input shape: {net.input_info[input_layer].tensor_desc.dims}")

# config = {"EXCLUSIVE_ASYNC_REQUESTS": "YES",
#           "CPU_THREADS_NUM": "8",
#           "CPU_THROUGHPUT_STREAMS": "CPU_THROUGHPUT_AUTO"}
config = {}

exec_net = ie.load_network(network=net, device_name="CPU", config=config)
input_layer = next(iter(net.input_info))

image_filename = r"D:\Deep_Learning\yolov5\whitelines\train\images\705.png"
image = cv2.imread(image_filename)
# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = net.input_info[input_layer].tensor_desc.dims
# OpenCV resize expects the destination size as (width, height)
resized_image = cv2.resize(src=image, dsize=(W, H))

input_data = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0).astype(np.float32)
input_data /= 255

t1 = time.time()
# for i in tqdm(range(1000)):
result = torch.tensor(exec_net.infer({input_layer: input_data})["output"])
print(time.time() - t1)

# NMS
pred = non_max_suppression(result, 0.01, 0.45, 0, False, max_det=10)
for i, det in enumerate(pred):
    gn = torch.tensor(image.shape)[[1, 0, 1, 0]]
    annotator = Annotator(image, line_width=3, example=str(["white_line"]))
    if len(det):
        # Rescale boxes from img_size to im0 size
        det[:, :4] = scale_coords(input_data.shape[2:], det[:, :4], image.shape).round()

        for *xyxy, conf, cls in reversed(det):
            c = int(cls)
            label = "white_line"
            annotator.box_label(xyxy, label)

cv2.imshow("detect", image)
cv2.waitKey()