Hello,
I’m trying to deploy an optimized frozen tensorflow model.the optimization is done by TensorRT.
following this ipynotebook:
https://github.com/ardianumam/Tensorflow-TensorRT/blob/master/7_optimizing_YOLOv3_using_TensorRT.ipynb
when I try to run the following inference part, the kernel is dead at
boxes, scores = sess.run(output_tensors, feed_dict=input_tensor:np.expand_dims(img_resized,axis=0)})
get input-output tensor
input_tensor, output_tensors =
utils.read_pb_return_tensors(tf.get_default_graph(),
TENSORRT_YOLOv3_MODEL,
[“Placeholder:0”, “concat_9:0”, “mul_9:0”])
perform inference
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5))) as sess:
vid = cv2.VideoCapture(video_path) # must use opencv >= 3.3.1 (install it by ‘pip install opencv-python’)
while True:
return_value, frame = vid.read()
if return_value == False:
print(‘ret:’, return_value)
vid = cv2.VideoCapture(video_path)
return_value, frame = vid.read()
if return_value:
image = Image.fromarray(frame)
else:
raise ValueError(“No image!”)
img_resized = np.array(image.resize(size=tuple(SIZE)),
dtype=np.float32)
img_resized = img_resized / 255.
prev_time = time.time()
boxes, scores = sess.run(output_tensors,
feed_dict={input_tensor:
np.expand_dims(
img_resized, axis=0)})
boxes, scores, labels = utils.cpu_nms(boxes,
scores,
num_classes,
score_thresh=0.4,
iou_thresh=0.5)
image = utils.draw_boxes(image, boxes, scores, labels,
classes, SIZE, show=False)
curr_time = time.time()
exec_time = curr_time - prev_time
result = np.asarray(image)
info = "time:" + str(round(1000*exec_time, 2)) + " ms, FPS: " + str(round((1000/(1000*exec_time)),1))
cv2.putText(result, text=info, org=(50, 70),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2)
#cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
cv2.imshow("result", result)
if cv2.waitKey(10) & 0xFF == ord('q'): break