Description
I have converted my model.onnx to createdEngine.engine using the following command:
/usr/src/tensorrt/bin/trtexec --onnx=/home/isl/fall_keras_model/keras_fall_model_onnx.onnx --saveEngine=/home/isl/success_engine.engine
Also in a python script I have the following code:
import cv2
from cvzone.HandTrackingModule import HandDetector
from cvzone.ClassificationModule import Classifier
import numpy as np
import math
final_output = “”
letters =
count_frames = 20
cap = cv2.VideoCapture(0)
detector = HandDetector(maxHands=1)
classifier = Classifier(“fall_keras_model.h5”, “fall_labels.txt”)
offset = 50
imgSize = 300
counter = 0
labels = [“A”, “B”, “back”, “C”, “D”, “E”, “F”, “G”, “H”, “I”, “J”, “K”, “L”, “M”,
“N”, “O”, “P”, “Q”, “R”, “S”, “space”, “T”, “U”, “V”, “W”, “X”, “Y”, “Z”] #back, space, j, z
while True:
success, img = cap.read()
hands = detector.findHands(img, draw=False)
filtered = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
filtered = cv2.GaussianBlur(filtered, (5, 5), 2)
filtered = cv2.adaptiveThreshold(filtered, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
ret, filtered = cv2.threshold(filtered, 170, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
cv2.imshow(“Original”, img)
if hands:
hand = hands[0]
x, y, w, h = hand[‘bbox’]
imgWhite = np.ones((imgSize, imgSize), np.uint8)*255
imgCrop = filtered[y-offset : y+h+offset, x-offset : x+w+offset]
imgCropShape = imgCrop.shape
aspectRatio = h/w
try:
if aspectRatio > 1:
k = imgSize/h
wCal = math.ceil(k*w)
imgResize = cv2.resize(imgCrop, (wCal, imgSize))
imgResizeShape = imgResize.shape
wGap = math.ceil((imgSize-wCal)/2)
imgWhite[:, wGap:wCal+wGap] = imgResize
gray2rgb = cv2.cvtColor(imgWhite, cv2.COLOR_GRAY2RGB)
else:
k = imgSize / w
hCal = math.ceil(k * h)
imgResize = cv2.resize(imgCrop, (imgSize, hCal))
imgResizeShape = imgResize.shape
hGap = math.ceil((imgSize - hCal) / 2)
imgWhite[hGap:hCal + hGap, :] = imgResize
gray2rgb = cv2.cvtColor(imgWhite, cv2.COLOR_GRAY2RGB)
prediction, index = classifier.getPrediction(gray2rgb)
#print(labels[index])
count_frames -= 1
lett = lett.replace(lett, "")
if count_frames == 0:
count_frames = 20
lett = max(letters, key = letters.count)
letters.clear()
if lett == "space":
final_output += " "
elif lett == "back":
final_output = final_output[0:len(final_output)-1]
else:
final_output += lett
else:
letters.append(labels[index])
#print(prediction)
if (x-offset > 0 and x+offset < img.shape[1] and y-offset > 0 and y+offset < img.shape[0]):
#cv2.imshow("Filtered", filtered)
#cv2.imshow("Cropped", imgCrop)
imgWhite = cv2.putText(imgWhite, final_output, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,255), 2, cv2.LINE_AA)
cv2.imshow("Final", imgWhite)
except:
print("ERROR: Hand out of frame")
key = cv2.waitKey(1)
if key == ord(‘q’):
cap.release()
cv2.destroyAllWindows()
ISSUE: i want to run this converted .engine tensorrt engine and pass my input image to get the prediction, so how should I load the engine in the above code and run it?
Environment
TensorRT Version: 8.0.6.1
GPU Type: Jetson Nano
Nvidia Driver Version:
CUDA Version:
CUDNN Version:
Operating System + Version: Ubuntu 18.04+
Python Version (if applicable): 3.6.9
TensorFlow Version (if applicable):
PyTorch Version (if applicable):
Baremetal or Container (if container which image + tag):
Relevant Files
Please attach or include links to any models, data, files, or scripts necessary to reproduce your issue. (Github repo, Google Drive, Dropbox, etc.)
Steps To Reproduce
Please include:
- Exact steps/commands to build your repro
- Exact steps/commands to run your repro
- Full traceback of errors encountered