Hello,
Part of my python script that shows images from Intel RealSense D435 camera in real-time is
model = YOLO("yolov10x.pt")
def predict(chosen_model, img, classes=[], conf=0.5):
if classes:
results = chosen_model.predict(img, classes=classes, conf=conf)
else:
results = chosen_model.predict(img, conf=conf)
return results
def predict_and_detect(chosen_model, img, classes=[], conf=0.5, rectangle_thickness=3, text_thickness=3):
results = predict(chosen_model, img, classes, conf=conf)
for result in results:
for box in result.boxes:
cv2.rectangle(img, (int(box.xyxy[0][0]), int(box.xyxy[0][1])),
(int(box.xyxy[0][2]), int(box.xyxy[0][3])), (49, 49, 255), rectangle_thickness)
cv2.putText(img, f"{result.names[int(box.cls[0])]}",
(int(box.xyxy[0][0]), int(box.xyxy[0][1]) - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1, (49, 49, 255), text_thickness)
return img, results
result_img, _ = predict_and_detect(model, color_image, classes=[0, 7], conf=0.5)
The last line, predict_and_detect
unfortunately is not real-time (lagged and gets stuck) on real-time video stream from Intel RealSense D435 camera. So, I am wondering what are the ways I can make it real-time on the Jetson device.
The code as is, is real-time on my laptop with a 3080 GPU.