Video stream picture encoding error

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson )
**• DeepStream Version5.1
**• JetPack Version (valid for Jetson only)4.5.1
**• TensorRT Version7.X
**• Issue Type( questions, new requirements, bugs) questions
Hi:
I simultaneously connected 8 channels of 720p RTSP video stream data on Python version. The backbone network is centerface, which is used to capture and save faces.However, the face I got was very blurred.This phenomenon does not occur in the video.
9a411d4b-2078-4291-bf4f-d601cf60fc65
This is the camera parameter configuration
image

image

When I run the official example, there is no such problem(source8_1080p_dec_infer-resnet_tracker_tiled_display_fp16_nano.txt)

How do you capture and save faces? Which deepstream sample code are you using?

This is my pipe setup

print("Creating Pgie \n ")
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
    sys.stderr.write(" Unable to create pgie \n")
print("Creating nvvidconv1 \n ")
nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
if not nvvidconv1:
    sys.stderr.write(" Unable to create nvvidconv1 \n")
nvvidconv1.set_property("src-crop","320:200:1280:720")
print("Creating filter1 \n ")
caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
if not filter1:
    sys.stderr.write(" Unable to get the caps filter1 \n")
filter1.set_property("caps", caps1)

tracker = Gst.ElementFactory.make("nvtracker", "tracker")
if not tracker:
    sys.stderr.write(" Unable to create tracker \n")

print("Creating tiler \n ")
tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
if not tiler:
    sys.stderr.write(" Unable to create tiler \n")
print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
    sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd \n ")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
    sys.stderr.write(" Unable to create nvosd \n")

print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nvoverlaysink", "nvvideo-renderer")
sink.set_property('sync',False)
sink.set_property('overlay-x',150)
sink.set_property('overlay-y',10)
sink.set_property('overlay-w',1280)
sink.set_property('overlay-h',720)
if not sink:
    sys.stderr.write(" Unable to create egl sink \n")

if is_live:
    print("Atleast one of the sources is live")
    streammux.set_property('live-source', 1)

streammux.set_property('width', 1280)
streammux.set_property('height', 720)
streammux.set_property('batch-size', 8)
streammux.set_property('batched-push-timeout', 400000)

pgie.set_property('config-file-path', "./primaryInference/dstest3_pgie_config.txt")
pgie_batch_size=pgie.get_property("batch-size")
if(pgie_batch_size != 8):
    print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", 8," \n")
    pgie.set_property("batch-size",8)

face_classifier.set_property('config-file-path', "dstest2_sgie1_config.txt")
face_classifier_batch_size=face_classifier.get_property("batch-size")
if(face_classifier_batch_size != 8):
    print("WARNING: Overriding infer-config batch-size",face_classifier_batch_size," with number of sources ", 8," \n")
    face_classifier.set_property("batch-size",8)

tiler_rows=int(math.sqrt(8))
tiler_columns=int(math.ceil((1.0*8)/tiler_rows))
tiler.set_property("rows",tiler_rows)
tiler.set_property("columns",tiler_columns)
tiler.set_property("width", TILED_OUTPUT_WIDTH)
tiler.set_property("height", TILED_OUTPUT_HEIGHT)
sink.set_property("qos",0)

#Set properties of tracker
config = configparser.ConfigParser()
config.read('dstest2_tracker_config.txt')
config.sections()

for key in config['tracker']:
    if key == 'tracker-width' :
        tracker_width = config.getint('tracker', key)
        tracker.set_property('tracker-width', tracker_width)
    if key == 'tracker-height' :
        tracker_height = config.getint('tracker', key)
        tracker.set_property('tracker-height', tracker_height)
    if key == 'gpu-id' :
        tracker_gpu_id = config.getint('tracker', key)
        tracker.set_property('gpu_id', tracker_gpu_id)
    if key == 'll-lib-file' :
        tracker_ll_lib_file = config.get('tracker', key)
        tracker.set_property('ll-lib-file', tracker_ll_lib_file)
    if key == 'll-config-file' :
        tracker_ll_config_file = config.get('tracker', key)
        tracker.set_property('ll-config-file', tracker_ll_config_file)
    if key == 'enable-batch-process' :
        tracker_enable_batch_process = config.getint('tracker', key)
        tracker.set_property('enable_batch_process', tracker_enable_batch_process)
    if key == 'enable-past-frame' :
        tracker_enable_past_frame = config.getint('tracker', key)
        tracker.set_property('enable_past_frame', tracker_enable_past_frame)

print("Adding elements to Pipeline \n")
pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(nvvidconv1)
pipeline.add(filter1)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)

print("Linking elements in the Pipeline \n")
streammux.link(queue1)
queue1.link(nvvidconv1)
nvvidconv1.link(queue2)
queue2.link(pgie)
pgie.link(queue3)

queue3.link(filter1)
filter1.link(queue4)

queue4.link(tracker)
tracker.link(queue5)

queue5.link(tiler)
tiler.link(queue6)
queue6.link(nvvidconv)
nvvidconv.link(queue7)
queue7.link(nvosd)
nvosd.link(queue8)
queue8.link(sink)
   
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)


queue3srcpad = queue5.get_static_pad("src")
if not queue3srcpad:
    sys.stderr.write(" Unable to get sink pad of nvvidconv \n")
else:
    queue3srcpad.add_probe(Gst.PadProbeType.BUFFER, sgie_sink_pad_buffer_probe, 0)

In the primary network, I use the face coordinates obtained by centerface to directly intercept the face in the original image

def sgie_sink_pad_buffer_probe(pad,info,u_data):
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list

while l_frame is not None:
    try:
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
    except StopIteration:
        break
    l_obj=frame_meta.obj_meta_list
    n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)

    while l_obj is not None:
        try:
            obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
        except StopIteration:
            break
        rect_params = obj_meta.rect_params
        h, w, _ = n_frame.shape
        top = max(int(rect_params.top) - 100, 0)
        left = max(int(rect_params.left) - 100, 0)
        right = min(int(rect_params.width + rect_params.left) + 100, w)
        bottom = min(int(rect_params.height + rect_params.top) + 100, h)
        face = n_frame[top: bottom, left: right]
        face = np.array(face, copy=True, order='C')
        face_copy = cv2.cvtColor(face, cv2.COLOR_RGBA2BGRA)
       
        //  ==================   Save face image  =====================

        try:
            l_obj=l_obj.next
        except StopIteration:
            break
    try:
        l_frame=l_frame.next
    except StopIteration:
        break
return Gst.PadProbeReturn.OK

You crop the original video and scaled, so the image quality is degraded.

This topic was automatically closed 60 days after the last reply. New replies are no longer allowed.