Using deepstream's image to process video files works fine, but rtsp input always fails

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU). GPU
• DeepStream Version 7.1
• JetPack Version (valid for Jetson only) docker
• TensorRT Version 10.3.0.26-1+cuda12.5
• NVIDIA GPU Driver Version (valid for GPU only) 575.51.03
• Issue Type( questions, new requirements, bugs) questions
**• How to reproduce the issue ? **

my enviroment is docker, from nvcr.io/nvidia/deepstream:7.1-gc-triton-devel ,dockerfile is:


FROM nvcr.io/nvidia/deepstream:7.1-gc-triton-devel
RUN apt-get update && apt-get install -y \
    python3-pip \
    ffmpeg \
    libopencv-dev \
    libgl1 \
    libglib2.0-0 \
    libsm6 && \
    apt-get clean && rm -rf /var/lib/apt/lists/*

RUN pip3 install --no-cache-dir \
    pyzmq \
    pyyaml \
    numpy \
    opencv-python \
    numpy \
    msgpack \
    ultralytics \
    onnxruntime \
    onnx \

mycode is :


import sys
import gi
import pyds
import math
import time

gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst

# ====== ======
RTSP_STREAMS = [
    "rtsp://admin:admin123@168.168.34.3:554/cam/realmonitor?channel=39&subtype=0",
    "rtsp://admin:admin123@168.168.34.4:554/cam/realmonitor?channel=32&subtype=0"
]
# for vido file
# RTSP_STREAMS = [
#     "file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4"
# ]
YOLOV8_CONFIG = "/sgx/prod/estellar/agent/config/config_infer_primary_yoloV8.txt"  # for yolov8 nvinfer 

# ====== infer callback ======
def pgie_src_pad_buffer_probe(pad, info, u_data):
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer")
        return Gst.PadProbeReturn.OK
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            class_id = obj_meta.class_id
            left = int(obj_meta.rect_params.left)
            top = int(obj_meta.rect_params.top)
            width = int(obj_meta.rect_params.width)
            height = int(obj_meta.rect_params.height)
            confidence = float(obj_meta.confidence)
            label = obj_meta.obj_label if obj_meta.obj_label else str(class_id)
            print(f"[infer] Stream {frame_meta.pad_index} | Frame {frame_number} | Class: {label} | Conf: {confidence:.3f} | BBox: [{left},{top},{width},{height}]")
            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK

# ====== create source bin ======
def create_source_bin(index, uri):
    bin_name = f"source-bin-{index:02d}"
    nbin = Gst.Bin.new(bin_name)
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", None)
    uri_decode_bin.set_property("uri", uri)
    uri_decode_bin.connect("pad-added", lambda dbin, pad, bin: bin.get_static_pad("src").set_target(pad), nbin)
    Gst.Bin.add(nbin, uri_decode_bin)
    nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    return nbin

# ====== main ======
def main():
    Gst.init(None)
    pipeline = Gst.Pipeline()
    streammux = Gst.ElementFactory.make("nvstreammux", "stream-muxer")
    streammux.set_property('batch-size', len(RTSP_STREAMS))
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batched-push-timeout', 33000)
    # use this if  inppt is video file
    # streammux.set_property('live-source', 0)  
    # use this if  inppt is rtsp
    streammux.set_property('live-source', 1)
    pipeline.add(streammux)
    # add source bin
    for i, uri in enumerate(RTSP_STREAMS):
        src_bin = create_source_bin(i, uri)
        pipeline.add(src_bin)
        sinkpad = streammux.request_pad_simple(f"sink_{i}")
        srcpad = src_bin.get_static_pad("src")
        srcpad.link(sinkpad)
    # 
    queue1 = Gst.ElementFactory.make("queue", None)
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    pgie.set_property('config-file-path', YOLOV8_CONFIG)
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", None)
    tiler.set_property('rows', 1)
    tiler.set_property('columns', len(RTSP_STREAMS))
    tiler.set_property('width', 1280)
    tiler.set_property('height', 720)
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", None)
    nvosd = Gst.ElementFactory.make("nvdsosd", None)
    sink = Gst.ElementFactory.make("fakesink", None)
    for elem in [queue1, pgie, tiler, nvvidconv, nvosd, sink]:
        pipeline.add(elem)
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(sink)
    # add probe
    pgie_src_pad = pgie.get_static_pad("src")
    pgie_src_pad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0)
    # event loop
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    def bus_call(bus, message, loop):
        t = message.type
        if t == Gst.MessageType.EOS:
            print('End-of-stream')
            loop.quit()
        elif t == Gst.MessageType.ERROR:
            err, debug = message.parse_error()
            print(f'Error: {err}, {debug}')
            loop.quit()
        return True
    bus.connect("message", bus_call, loop)
    print("start pipeline ...")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    pipeline.set_state(Gst.State.NULL)
    print("quit")

if __name__ == "__main__":
    main() 

the error is:

root@2288H-V6:/sgx/prod/estellar/agent# python3 test_rtsp_inter.py 
start pipeline ...
0:00:00.396288068  3598 0x5a379a2980f0 INFO                 nvinfer gstnvinfer.cpp:684:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2092> [UID = 1]: deserialized trt engine from :/sgx/prod/estellar/agent/models/yolov8s_ds.engine
Implicit layer support has been deprecated
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:327 [Implicit Engine Info]: layers num: 0

0:00:00.396366568  3598 0x5a379a2980f0 INFO                 nvinfer gstnvinfer.cpp:684:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2195> [UID = 1]: Use deserialized engine model: /sgx/prod/estellar/agent/models/yolov8s_ds.engine
0:00:00.404686756  3598 0x5a379a2980f0 INFO                 nvinfer gstnvinfer_impl.cpp:343:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:/sgx/prod/estellar/agent/config/config_infer_primary_yoloV8.txt sucessfully
Aborted (core dumped)

if i change rtsp to video file, this code run OK.

config_infer_primary_yoloV8.txt is:

[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
model-color-format=0
#onnx-file=/sgx/prod/estellar/agent/models/yolov8s.onnx
model-engine-file=/sgx/prod/estellar/agent/models/yolov8s_ds.engine
#int8-calib-file=calib.table
labelfile-path=/sgx/prod/estellar/agent/models/labels.txt
batch-size=1
network-mode=0
num-detected-classes=80
interval=0
gie-unique-id=1
process-mode=1
network-type=0
cluster-mode=2
maintain-aspect-ratio=1
symmetric-padding=1
#workspace-size=2000
parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=/sgx/prod/estellar/agent/lib/libnvdsinfer_custom_impl_Yolo.so
engine-create-func-name=NvDsInferYoloCudaEngineGet

[class-attrs-all]
nms-iou-threshold=0.45
pre-cluster-threshold=0.25
topk=300

how to fix it? thanks

debug using GDB:


(gdb) bt
#0  0x00007ffff7cdd9fc in pthread_kill () at /lib/x86_64-linux-gnu/libc.so.6
#1  0x00007ffff7c89476 in raise () at /lib/x86_64-linux-gnu/libc.so.6
#2  0x00007ffff7c6f7f3 in abort () at /lib/x86_64-linux-gnu/libc.so.6
#3  0x00007ffff682c692 in  () at /lib/x86_64-linux-gnu/libstdc++.so.6
#4  0x00007ffff68379da in __gxx_personality_v0 () at /lib/x86_64-linux-gnu/libstdc++.so.6
#5  0x00007ffff6759fe9 in __libunwind_Unwind_Resume () at /lib/x86_64-linux-gnu/libunwind.so.8
#6  0x00007fffd1f0986d in  () at /lib/x86_64-linux-gnu/libproxy.so.1
#7  0x00007fffd1f12827 in px_proxy_factory_get_proxies () at /lib/x86_64-linux-gnu/libproxy.so.1
#8  0x00007fffd6a32827 in  () at /usr/lib/x86_64-linux-gnu/gio/modules/libgiolibproxy.so
#9  0x00007ffff7456644 in g_task_thread_pool_thread (thread_data=0x7fff80001c50, pool_data=<optimized out>) at ../gio/gtask.c:1531
#10 0x00007ffff7739384 in g_thread_pool_thread_proxy (data=<optimized out>) at ../glib/gthreadpool.c:350
#11 0x00007ffff7738ac1 in g_thread_proxy (data=0x7fff900015f0) at ../glib/gthread.c:831
#12 0x00007ffff7cdbac3 in  () at /lib/x86_64-linux-gnu/libc.so.6
#13 0x00007ffff7d6d850 in  () at /lib/x86_64-linux-gnu/libc.so.6
(gdb) 


error is caused by libproxy.so.1 / libgiolibproxy.so

resolved by

export GIO_USE_PROXY_RESOLVER=0
mv /usr/lib/x86_64-linux-gnu/gio/modules/libgiolibproxy.so /usr/lib/x86_64-linux-gnu/gio/modules/libgiolibproxy.so.bak

Sorry for the late reply, Is this still an DeepStream issue to support? Thanks!

I am still developing and I have encountered a new crash problem: After adding target tracking (nvtracker ), pipline runs unstably and the process crashes quickly. I will try to solve it myself first. thanks

Thanks for the update! Is this still an DeepStream issue to support? Thanks!

I finish my work ,thanks . congfig is very important

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.