Hi,
I developed an image segmentation program on Jetson Nano using jetson inference library. It worked well without any issues on the Nano.
import jetson.inference
import jetson.utils
import ctypes
import sys
import cv2
from imutils.video import VideoStream
import imutils
import time
import os
import numpy as np
# Parameters
global network, width, height, img_overlay1, img_mask
network = 'fcn-resnet18-cityscapes-512x256'
width = 512 # Desired output width
height = 256 # Desired output height
def seg_params(net = 'fcn-resnet18-cityscapes-512x256', wid = 512, ht = 256):
global network, width, height
network = net
width = wid # Desired output width
height = ht # Desired output height
print('Done setting the parameters...')
def seg_network():
net = jetson.inference.segNet(network)
print('Finished loading the pretrained network...')
return net
def mem_alloc():
global img_overlay1, img_mask
img_overlay1 = jetson.utils.cudaAllocMapped(width*height*4*ctypes.sizeof(ctypes.c_float))
img_mask = jetson.utils.cudaAllocMapped(width*height*4*ctypes.sizeof(ctypes.c_float))
def segnet_inference(net, frame):
global img_overlay1, img_mask
frame = cv2.resize(frame, (width, height))
# Converting to RGBA format for Jetson Inference
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2RGBA)
# Converting to jetson capsule for optimized performance
cuda_frame = jetson.utils.cudaFromNumpy(frame)
# Running the inference model on the capsule
net.Process(cuda_frame, width, height)
net.SetOverlayAlpha(50.0)
""" if index == 1:
net.Overlay(img_overlay1, width, height, filter_mode = 'linear')
out_frame = jetson.utils.cudaToNumpy(img_overlay1, width, height, 4)
else:
net.Overlay(img_overlay2, width, height, filter_mode = 'linear')
out_frame = jetson.utils.cudaToNumpy(img_overlay2, width, height, 4) """
net.Overlay(img_overlay1, width, height, filter_mode = 'linear')
net.Mask(img_mask, width, height, filter_mode = 'linear')
out_frame = jetson.utils.cudaToNumpy(img_overlay1, width, height, 4)
out_mask = jetson.utils.cudaToNumpy(img_mask, width, height, 4)
# Converting the output to numpy array
out_frame = cv2.cvtColor(out_frame, cv2.COLOR_RGB2BGR)
out_frame = out_frame.astype(np.uint8)
out_mask = cv2.cvtColor(out_mask, cv2.COLOR_RGB2BGR)
out_mask = out_mask.astype(np.uint8)
return out_frame, out_mask
if __name__ == '__main__':
seg_params()
net = seg_network()
vs = cv2.VideoCapture(os.path.join(os.getcwd(),'Lane_Videos','Video_009.mp4'))
mem_alloc()
while True:
grabbed, frame = vs.read()
if grabbed is False:
print('No frame captured')
break
img = frame.copy()
out_frame = segnet_inference(net, img)
cv2.imshow('Output', out_frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
vs.release()
cv2.destroyAllWindows()
Later, due to hardware limitations, I decided to switch the development to Jetson TX2. After installing all the dependencies, I tried running the same code on Jetson TX2 but it always threw the following error.
One reason I could think of is the build date of jetson inference. The one for Nano was built around mid June whereas the one for Jetson TX2 was built yesterday. The updated examples on github indicate that the way inference is done has been changed. Could anyone help me a way around this issue with the above implementation?
Thank you