Hi i converted the yolov3-mobilenetv2 tlt model to trt engine with tlt-convertor.
This trt engine giving good results with infering through tlt-infer and deepstream. Next I tried to infer the same TensorRT model via python standalone script.But when i infer with script, i got more false positives.
Here i add my preprocesing and post processing script.
can you ,let me know if any mistakes i need to correct here?
Preprocessing steps
def process_image(self,arr):
image_resized=cv2.resize(arr,(self.model_w, self.model_h))
img_np = image_resized.astype(np.float32)
# HWC -> CHW
img_np = img_np.transpose((2, 0, 1))
img_np = img_np.ravel()
return img_np
Postprocessing steps
def _nms_boxes(self, boxes, box_confidences):
x_coord = boxes[:, 0]
y_coord = boxes[:, 1]
width = boxes[:, 2]
height = boxes[:, 3]
areas = width * height
ordered = box_confidences.argsort()[::-1]
keep = list()
while ordered.size > 0:
# Index of the current element:
i = ordered[0]
keep.append(i)
xx1 = np.maximum(x_coord[i], x_coord[ordered[1:]])
yy1 = np.maximum(y_coord[i], y_coord[ordered[1:]])
xx2 = np.minimum(x_coord[i] + width[i], x_coord[ordered[1:]] + width[ordered[1:]])
yy2 = np.minimum(y_coord[i] + height[i], y_coord[ordered[1:]] + height[ordered[1:]])
width1 = np.maximum(0.0, xx2 - xx1 + 1)
height1 = np.maximum(0.0, yy2 - yy1 + 1)
intersection = width1 * height1
union = (areas[i] + areas[ordered[1:]] - intersection)
# Compute the Intersection over Union (IoU) score:
iou = intersection / union
# The goal of the NMS algorithm is to reduce the number of adjacent bounding-box
# candidates to a minimum. In this step, we keep only those elements whose overlap
# with the current bounding box is lower than the threshold:
indexes = np.where(iou <= self.nms_threshold)[0]
ordered = ordered[indexes + 1]
keep = np.array(keep)
return keep
def postprocess(self, outputs, wh_format=True):
"""
Postprocesses the inference output
Args:
outputs (list of float): inference output
min_confidence (float): min confidence to accept detection
analysis_classes (list of int): indices of the classes to consider
Returns: list of list tuple: each element is a two list tuple (x, y) representing the corners of a bb
"""
p_keep_count = outputs[0]
p_bboxes = outputs[1]
p_scores = outputs[2]
p_classes = outputs[3]
analysis_classes = list(range(self.NUM_CLASSES))
threshold = self.min_confidence
p_bboxes = np.array_split(p_bboxes,len(p_bboxes)/4)
bbs = []
class_ids = []
scores = []
x_scale = self.img_shape[1] / self.model_w
y_scale = self.img_shape[0] / self.model_h
for i in range(p_keep_count[0]):
assert(p_classes[i] < len(analysis_classes))
if p_scores[i]>threshold:
x1 = int(np.round(p_bboxes[i][0]*x_scale))
y1 = int(np.round(p_bboxes[i][1]*y_scale))
x2 = int(np.round(p_bboxes[i][2]*x_scale))
y2 = int(np.round(p_bboxes[i][3]*y_scale))
bbs.append([x1,y1,x2,y2])
class_ids.append(p_classes[i])
scores.append(p_scores[i])
#print(class_ids)
bbs = np.asarray(bbs)
class_ids = np.asarray(class_ids)
scores = np.asarray(scores)
nms_boxes, nms_categories, nscores = list(), list(), list()
for category in set(class_ids):
idxs = np.where(class_ids == category)
box = bbs[idxs]
category = class_ids[idxs]
confidence = scores[idxs]
keep = self._nms_boxes(box, confidence)
nms_boxes.append(box[keep])
nms_categories.append(category[keep])
nscores.append(confidence[keep])
if len(nms_boxes)==0:
return [],[],[]
return nms_boxes, nms_categories, nscores