• Hardware Platform (Jetson / GPU) Jetson Xavier NX
• DeepStream Version 5.0
• JetPack Version (valid for Jetson only) R32 Revision: 5.0 GCID: 25531747 Board: t186ref
• TensorRT Version 7.1.3 + CUDA 10.2
• Issue Type( questions, new requirements, bugs) Please see below
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing) please see below
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description) please see below
Using Python deepstream-imagedata-multistream app as an example, I have created my own app. I noticed that the app would fail silently, and based on what I saw from the log, this seems to be the issue.
[2021-03-26T08:49:28.549-07:00][INFO]-gstnvtracker: NvBufSurfTransform failed with error -2 while converting buffergstnvtracker: Failed to convert input batch.
[2021-03-26T08:49:28.549-07:00][ERROR]-SYNC_IOC_FENCE_INFO ioctl failed with 9
[2021-03-26T08:49:28.589-07:00][ERROR]-58:04:05.548567353 e[332m 5569e[00m 0x33445770 e[33;01mWARN e[00m e[00m nvinfer gstnvinfer.cpp:1984:gst_nvinfer_output_loop:e[00m error: Internal data stream error.
[2021-03-26T08:49:28.589-07:00][ERROR]-58:04:05.548703833 e[332m 5569e[00m 0x33445770 e[33;01mWARN e[00m e[00m nvinfer gstnvinfer.cpp:1984:gst_nvinfer_output_loop:e[00m error: streaming stopped, reason error (-5)
[2021-03-26T08:49:28.594-07:00][ERROR]-bus_call.py:37,Error: gst-stream-error-quark: Failed to submit input to tracker (1): /dvs/git/dirty/git-master_linux/deepstream/sdk/src/gst-plugins/gst-nvtracker2/gstnvtracker.cpp(581): gst_nv_tracker_submit_input_buffer (): /GstPipeline:pipeline0/GstNvTracker:tracker
My guess is that this error is happening when we are performing the following -
n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
frame_image = np.array(n_frame, copy=True, order=‘C’)
frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA)
This is performed when we find something of an interest. We use frame_image
to save and send to the cloud.
When the error related to NvBufSurfTransform
shows up, it seems to stop getting the feed from the camera. Is there anything obvious that I may have implemented incorrectly?
Please see below for the snippets of the code as well as actual code in the attached docs.
Would appreciate any pointer.
Thank you!
> def tiler_sink_pad_buffer_probe(pad, info, u_data):
> global trackableObjects
> global objectTrackingTotalFrames
> global ss
> global ct
> global directionInfo
>
> frame_number = 0
> num_rects = 0
> counting_car = 0
> trackers = []
> all_result = {}
>
> gst_buffer = info.get_buffer()
> if not gst_buffer:
> print("Unable to get GstBuffer ")
> return
>
> # Retrieve batch metadata from the gst_buffer
> # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
> # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
> batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
> l_frame = batch_meta.frame_meta_list
> while l_frame is not None:
> try:
> # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
> # The casting is done by pyds.NvDsFrameMeta.cast()
> # The casting also keeps ownership of the underlying memory
> # in the C code, so the Python garbage collector will leave
> # it alone.
> frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
> except StopIteration:
> break
>
> frame_number = frame_meta.frame_num
> l_obj = frame_meta.obj_meta_list
> num_rects = frame_meta.num_obj_meta
> is_first_obj = True
> save_image = False
> dc = direction_counter_module.DirectionCounter(
> 'horizontal', frame_meta.source_frame_height, frame_meta.source_frame_width)
>
> while l_obj is not None:
> try:
> # Casting l_obj.data to pyds.NvDsObjectMeta
> obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
> except StopIteration:
> print('********* [ERROR] happened at location 1 *********')
> break
> # obj_counter[obj_meta.class_id] += 1
>
> # Periodically check for objects with borderline confidence value that may be false positive detections.
> # If such detections are found, annoate the frame with bboxes and confidence value.
> # Save the annotated frame to file.
> if(pgie_classes_str[obj_meta.class_id] == "license_plate[dstest3_pgie_config.txt|attachment](upload://2iQ31E73aI6grLV2RLI4eeHpQgO.txt) (3.6 KB) [modified.py|attachment](upload://2GmrT9sca1uZ9Ck8Oh7jIZ98BU2.py) (28.0 KB) "):
>
> if is_first_obj:
> # check if this is the first object in the frame, this way, we only save once per frame
> is_first_obj = False
> # Getting Image data using nvbufsurface
> # the input should be address of buffer and batch_id
> # print('[------------] inside of is_first_obj [------------]')
> n_frame = pyds.get_nvds_buf_surface(
> hash(gst_buffer), frame_meta.batch_id)
> # convert python array into numy array format.
> frame_image = np.array(n_frame, copy=True, order='C')
> # covert the array into cv2 default color format
> frame_image = cv2.cvtColor(
> frame_image, cv2.COLOR_RGBA2BGRA)
>
> save_image = True
> flash_on()
>
> rect_params = obj_meta.rect_params
> startY = int(rect_params.top)
> startX = int(rect_params.left)
> width = int(rect_params.width)
> height = int(rect_params.height)
> endY = startY + height
> endX = startX + width
> score = str(int(abs(obj_meta.confidence) * 100))
>
> all_result[score] = (startX, startY, endX, endY)
>
> try:
> l_obj = l_obj.next
> except StopIteration:
> print('********* [ERROR] happened at location 5 *********')
> break
>
>
> # Out of all the objects/rectangles we found, track the only one with highest confidence level
> if bool(all_result):
> sorted_result = list(all_result)
> sorted_result.sort(reverse=True)
> trackers.append(all_result[sorted_result[0]])
>
> # use the centroid tracker to associate the (1) old object centroids with (2) the newly computed object centroids
> objects = ct.update(
> trackers, objectTrackingTotalFrames)
>
> if save_image:
> # loop over the tracked objects
> for (objectID, centroid) in objects.items():
> to = trackableObjects.get(objectID, None)
>
> if to is None:
> # print(
> # '[INFO - save_image] no existing trackable object with that ID ... creating one')
> to = tracker_object_module.TrackableObject(
> objectID, centroid)
>
> else:
> # print(
> # '[INFO - save_image] found an object with that ID')
> to.centroids.append(centroid)
>
> print(to.counted)
>
> # using the first 3 frames to set the direction
> if to.counted <= 2:
>
> dc.find_direction(to, centroid)
> # find the direction of motion which will tell us whether to send to entrance or exit
> # if door is not "ENTRANCE":
> directionInfo = dc.count_object(
> to, centroid, door)
>
> elif 3 <= to.counted <= 7:
> loc_dt = datetime.datetime.now(
> tz=dateutil.tz.gettz(timezone))
> localized_time = loc_dt.strftime("%Y-%m-%d-%H-%M-%S")
> localized_time_yr = loc_dt.strftime("%Y")
> localized_time_month = loc_dt.strftime("%m")
> localized_time_day = loc_dt.strftime("%d")
> door_short = "EN" if door == "ENTRANCE" else "EX"
> door_new = door if directionInfo and directionInfo == door_short else "EXIT" if door == "ENTRANCE" else "ENTRANCE"
> folder_name = f'public/{company_name}/{place}/{door_new}/{localized_time_yr}/{localized_time_month}/{localized_time_day}/{objectID}'
>
> local_directory = device_user_name
> entire_path = local_directory + folder_name
>
> os.umask(0)
> os.makedirs(entire_path, mode=0o777,
> exist_ok=True)
>
> door_short_new = door_new[:2]
>
> filename = f'_{localized_time}_{objectID}_{door_short_new}_{score}_{startX}_{startY}_{endX}_{endY}'
>
> ss.start(folder_name, filename,
> frame_image, to.counted, s3_bucket)
>
> if not ss.sending:
> ss.finish()
>
> trackableObjects[objectID] = to
>
> to.counted += 1
>
> objectTrackingTotalFrames += 1
>
> try:
> l_frame = l_frame.next
> except StopIteration:
> print('********* [ERROR] happened at location 6 *********')
> break
>
> return Gst.PadProbeReturn.OK
dstest3_pgie_config.txt (3.6 KB) modified.py (28.0 KB)