I have converted YOLOV3 model to onnx and from onnx to TRT model .
Now I am trying to run inference on received image using socketio . Here is my code:
import os
import time
import argparse
import numpy as np
import cv2
import pycuda.autoinit # This is needed for initializing CUDA driver
import socketio
import base64
from utils.yolo_classes import get_cls_dict
from utils.camera import add_camera_args, Camera
from utils.display import open_window, set_display, show_fps
from utils.visualization import BBoxVisualization
from utils.yolo_with_plugins import TrtYOLO
global conf_th
conf_th = 0.3
‘’’
Loading Model
‘’’
global trt_yolo
trt_yolo = TrtYOLO(“yolov3-custom-416”, (416, 416), 3)
print (“trt_yolo ==>”, trt_yolo )
WINDOW_NAME = ‘TrtYOLODemo’
inputShape = (300,300)
‘’’
Shinobi Plugin Variables
‘’’
shinobiPLuginName = “NoMask”
shinobiPluginKey = “NoMask123123”
shinobiHost = ‘http://192.168.0.109:9090’
‘’’
Socker IO Connection with Reconnection
‘’’
sio = socketio.Client(reconnection=True,reconnection_delay=1,ssl_verify = False)
sio.connect(shinobiHost,transports=‘websocket’)
sio.emit(‘ocv’,
{‘f’:‘init’,‘plug’:shinobiPLuginName,‘type’:‘detector’,‘connectionType’:‘websocket’,‘pluginKey’:shinobiPluginKey})
#Socket IO Connection Event , Built in Reconneciton Logic
@sio.event
def connect():
print(‘connection established :’)
sio.emit(‘ocv’,
{‘f’:‘init’,‘plug’:shinobiPLuginName,‘type’:‘detector’,‘connectionType’:‘websocket’,‘pluginKey’:shinobiPluginKey})
#Socket IO Re Connection Event
@sio.event
def reconnect():
print (“Reconnection established :”)
sio.emit(‘ocv’,
{‘f’:‘init’,‘plug’:shinobiPLuginName,‘type’:‘detector’,‘connectionType’:‘websocket’,‘pluginKey’:shinobiPluginKey})
#Socket IO Disconnect Event
@sio.event
def disconnect():
print(‘disconnected from server’)
def yolo_detection(img_np,trt_yolo,recvdImg,height, width,shinobiId,shonibiKe):
frame = img_np
trt_yolo = trt_yolo
print (“trt_yolo_YOLODETECTION”, trt_yolo)
(h, w) = frame.shape[:2]
#shinobiIdSend = sId
#shonibiKeSend = ske
recvdImg = recvdImg
boxes, confs, clss = trt_yolo.detect(frame, conf_th)
print ("boxes ", boxes)
print (“confs”, confs)
print (“clss” , clss)
#f event ! , Frame will be recived in this fucntion
@sio.event
def f(data):
# print (“on_f”)
#print (“Data”,data)
# print (“type(data)”,type(data))
# print (“data[ke]”,data.get(“ke”))
# print (“data[f]”,data.get(“f”))
# print (“data[id]”,data.get(“id”))
shinobiId = data.get(“id”)
shonibiKe = data.get(“ke”)
#print (“data[frame]”,data.get(“frame”))
recvdImg = data.get(“frame”)
#print (“Type of Image”, type(recvdImg))
#print (“Length of Image”,len(recvdImg))
nparr = np.fromstring(recvdImg, np.uint8)
print (“trt_yolo ON F!! ==>”, trt_yolo )
img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
#img_np = cv2.resize(img_np,inputShape,interpolation = cv2.INTER_AREA)
#print (“Image Recieved !!!”)
#cv2.imwrite(‘recvdImg.jpg’,img_np)
yolo_detection(img_np,trt_yolo,recvdImg,img_np.shape[0],img_np.shape[1],shinobiId,shonibiKe)
It will wait for socket io events!!
sio.wait()