the host : Description: Ubuntu 24.04 LTS
deepstream container : ubuntu 22.04 LTS
NVIDIA-SMI 535.183.06
Driver Version: 535.183.06
CUDA Version: 12.2
Previously, when I ran this container on my Ubuntu22.04 machine, everything was normal. However, this time I had to run this container on Ubuntu24 and it encountered an issue.
when I use :export GST_DEBUG=3, No errors were displayed。
The Main functions is :
def run(self):
# Standard GStreamer initialization
Gst.init(None)
# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
self.pipeline = Gst.Pipeline()
is_live = False
if not self.pipeline:
self.logger.error(" Unable to create Pipeline \n")
return
print("Creating streammux \n ")
# Create nvstreammux instance to form batches from one or more sources.
self.streammux = self._create_nvstreammux()
if not self.streammux:
self.logger.error(" Unable to create NvStreamMux \n")
return
if self.video_url.find("rtsp://") == 0:
is_live = True
print("Atleast one of the sources is live")
self.streammux.set_property('live-source', 1)
# Create first source bin and add to pipeline
source_bin = self._create_uridecode_bin(0, self.video_url)
if not source_bin:
print("Failed to create source bin. Exiting. \n")
self.logger.error("Failed to create source bin. Exiting. \n")
return
self.g_source_bins[0] = source_bin
self.pipeline.add(source_bin)
print("Creating Pgie \n ")
# pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
pgie = self._create_element("nvinfer", "primary-inference", "primary-inference")
if not pgie:
print("Failed to create pgie bin. \n")
self.logger.error(" Unable to create pgie \n")
# Set pgie, sgie1, and sgie2 configuration file paths
pgie.set_property('config-file-path', self.inferConfigFile)
# Set necessary properties of the nvinfer element, the necessary ones are:
pgie.set_property("batch-size", self.MAX_SOURCE_COUNT)
# Set gpu IDs of the inference engines
pgie.set_property("gpu_id", 0)
print("Creating nvvidconv1 \n ")
# nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
nvvidconv1 = self._create_element("nvvideoconvert", "convertor1", "nvvideoconvert")
if not nvvidconv1:
print("Failed to create nvvidconv1 bin. \n")
self.logger.error(" Unable to create nvvidconv1 \n")
print("Creating filter1 \n ")
caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
# filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
filter1 = self._create_element("capsfilter", "filter1", "capsfilter")
if not filter1:
print("Failed to create filter1 bin. \n")
self.logger.error(" Unable to get the caps filter1 \n")
filter1.set_property("caps", caps1)
print("Creating nvosd \n ")
#nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
nvosd = self._create_element("nvdsosd", "onscreendisplay", "nvdsosd")
if not nvosd:
print("Failed to create nvdsosd bin. \n")
self.logger.error(" Unable to create nvosd \n")
# Set gpu IDs of tiler, nvvideoconvert, and nvosd
nvosd.set_property("gpu_id", 0)
# sink = Gst.ElementFactory.make("fakesink", "fake-sink")
sink = self._create_element("fakesink", "fake-sink", "fake-sink")
if not sink:
print("Failed to create fakesink bin. \n")
self.logger.error(" Unable to create egl sink \n")
sink.set_property("sync", 0)
sink.set_property("qos", 0)
# We link elements in the following order:
# sourcebin -> streammux -> nvinfer -> nvtracker -> nvdsanalytics ->
# nvtiler -> nvvideoconvert -> nvdsosd -> sink
print("Linking elements in the Pipeline \n")
self.streammux.link(pgie)
pgie.link(nvvidconv1)
nvvidconv1.link(filter1)
filter1.link(nvosd)
nvosd.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", self._bus_call, loop)
print("----------pipeline set_state(Gst.State.PAUSED) ----------")
self.pipeline.set_state(Gst.State.PAUSED)
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, self._save_frame, 0)
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, self._tiler_sink_pad_buffer_probe, 0)
print("Starting pipeline \n")
# start play back and listed to events
self.pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
print("Exiting app\n")
self.pipeline.set_state(Gst.State.NULL)```
When executing the above function, printing appears normal
The problem should be here:
def _create_uridecode_bin(self, source_id, url):
def decodebin_child_added(child_proxy, Object, name, user_data):
print("Decodebin child added:", name, "\n")
if (name.find("decodebin") != -1):
Object.connect("child-added", decodebin_child_added, user_data)
if (name.find("nvv4l2decoder") != -1):
Object.set_property("gpu_id", 0)
Object.set_property("drop-frame-interval", self.drop_frame_interval)
def cb_newpad(decodebin, pad, data):
print("In cb_newpad\n")
caps = pad.get_current_caps()
gststruct = caps.get_structure(0)
gstname = gststruct.get_name()
# Need to check if the pad created by the decodebin is for video and not
# audio.
print("gstname=", gstname)
if (gstname.find("video") != -1):
source_id = data
pad_name = "sink_%u" % source_id
print(pad_name)
# Get a sink pad from the streammux, link to decodebin
sinkpad = self.streammux.request_pad_simple(pad_name)
if not sinkpad:
print("Decodebin link to pipeline error 1", sinkpad)
self.logger.error("Unable to create sink pad bin \n")
if pad.link(sinkpad) == Gst.PadLinkReturn.OK:
print("Decodebin linked to pipeline")
else:
print("Decodebin link to pipeline error 2", sinkpad)
self.logger.error("Failed to link decodebin to pipeline\n")
print("Creating uridecodebin for [%s]" % url)
# Create a source GstBin to abstract this bin's content from the rest of the
# pipeline
bin_name = "source-bin-%02d" % source_id
print(bin_name)
# Source element for reading from the uri.
# We will use decodebin and let it figure out the container format of the
# stream and the codec and plug the appropriate demux and decode plugins.
bin = Gst.ElementFactory.make("uridecodebin", bin_name)
print('----------------bin is {}'.format(bin))
if not bin:
print(' Unable to create uri decode bin ')
self.logger.error(" Unable to create uri decode bin \n")
# We set the input uri to the source element
bin.set_property("uri", url)
# Connect to the "pad-added" signal of the decodebin which generates a
# callback once a new pad for raw data has been created by the decodebin
bin.connect("pad-added", cb_newpad, source_id)
bin.connect("child-added", decodebin_child_added, source_id)
return bin
The cb_newpad function is not triggered here,no printing, but there are also no errors,
Under normal circumstances, it should have similar printing:: print(“In cb_newpad\n”) 、 print(bin_name)
Can you provide me with some ideas for troubleshooting problems?```
some print like this:
Creating Pipeline
Creating streammux
Creating uridecodebin for [rtmp://127.0.0.1:10936/test/1]
source-bin-00
----------------bin is <__gi__.GstURIDecodeBin object at 0x74059a801640 (GstURIDecodeBin at 0x6524e624e080)> 0 rtmp://127.0.0.1:10936/test/1
Creating Pgie
Creating nvvidconv1
Creating filter1
Creating nvosd
Linking elements in the Pipeline
----------pipeline set_state(Gst.State.PAUSED) ----------
process_param: {'info': {'video': [{'id': 2, 'task_id': 2, 'stream': '', 'content': '/home/runone/program/folder/video_records/foshan.mp4', 'nvr_record_url': None, 'streamFlag': 0}]}, 'status': 1, 'stop': 0, 'task_id': 2}
add info: {'id': 2, 'task_id': 2, 'stream': '', 'content': '/home/runone/program/folder/video_records/foshan.mp4', 'nvr_record_url': None, 'streamFlag': 0}
process_info /home/runone/program/folder/video_records/foshan.mp4 {'id': 2, 'task_id': 2, 'stream': '', 'content': '/home/runone/program/folder/video_records/foshan.mp4', 'nvr_record_url': None, 'streamFlag': 0}
0:00:00.335387633 18778 0x6524e68b1530 WARN nvinfer gstnvinfer.cpp:679:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Warning from NvDsInferContextImpl::initialize() <nvdsinfer_context_impl.cpp:1243> [UID = 1]: Warning, OpenCV has been deprecated. Using NMS for clustering instead of cv::groupRectangles with topK = 20 and NMS Threshold = 0.5
socket thread start 0.0.0.0 8848
0.0.0.0:8848 is unused
!!!!analysis init sucess vith vision:3.1.10
WARNING: [TRT]: TensorRT was linked against cuDNN 8.9.0 but loaded cuDNN 8.7.0
WARNING: [TRT]: TensorRT was linked against cuDNN 8.9.0 but loaded cuDNN 8.7.0
0:00:07.318628548 18778 0x6524e68b1530 INFO nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2092> [UID = 1]: deserialized trt engine from :/home/runone/program/folder/model/yolov8s_exp85_736_11.engine
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:610 [FullDims Engine Info]: layers num: 5
0 INPUT kFLOAT images 3x736x736 min: 1x3x736x736 opt: 8x3x736x736 Max: 16x3x736x736
1 OUTPUT kINT32 num_dets 1 min: 0 opt: 0 Max: 0
2 OUTPUT kFLOAT bboxes 100x4 min: 0 opt: 0 Max: 0
3 OUTPUT kFLOAT scores 100 min: 0 opt: 0 Max: 0
4 OUTPUT kINT32 labels 100 min: 0 opt: 0 Max: 0
0:00:07.461398333 18778 0x6524e68b1530 INFO nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2195> [UID = 1]: Use deserialized engine model: /home/runone/program/folder/model/yolov8s_exp85_736_11.engine
0:00:07.470258694 18778 0x6524e68b1530 INFO nvinfer gstnvinfer_impl.cpp:328:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:/home/runone/deepstream-implatform/deepstream-common/config_infer_primary_yoloV8.txt sucessfully
Decodebin child added: source
Decodebin child added: typefindelement0
Starting pipeline
Decodebin child added: decodebin0
Decodebin child added: queue2-0
Decodebin child added: flvdemux0
Decodebin child added: multiqueue0
Decodebin child added: h264parse0
Decodebin child added: capsfilter0
Decodebin child added: nvv4l2decoder0
I found that when I ran the official demo directly, it was the same situation:
/opt/nvidia/deepstream/deepstream-6.4/sources/deepstream_python_apps/apps/deepstream-test1# python3 deepstream_test_1.py /opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_720p.h264```
print like:
Creating Pipeline
Creating Source
Creating H264Parser
Creating Decoder
Creating EGLSink
Playing file /opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_720p.h264
Adding elements to Pipeline
Linking elements in the Pipeline
/opt/nvidia/deepstream/deepstream-6.4/sources/deepstream_python_apps/apps/deepstream-test1/deepstream_test_1.py:220: DeprecationWarning: Gst.Element.get_request_pad is deprecated
sinkpad = streammux.get_request_pad("sink_0")
Starting pipeline
WARNING: ../nvdsinfer/nvdsinfer_model_builder.cpp:1487 Deserialize engine failed because file path: /opt/nvidia/deepstream/deepstream-6.4/sources/deepstream_python_apps/apps/deepstream-test1/../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine open error
0:00:07.509411368 104 0x5f2126b51330 WARN nvinfer gstnvinfer.cpp:679:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Warning from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2080> [UID = 1]: deserialize engine from file :/opt/nvidia/deepstream/deepstream-6.4/sources/deepstream_python_apps/apps/deepstream-test1/../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine failed
0:00:07.623769306 104 0x5f2126b51330 WARN nvinfer gstnvinfer.cpp:679:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Warning from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2185> [UID = 1]: deserialize backend context from engine from file :/opt/nvidia/deepstream/deepstream-6.4/sources/deepstream_python_apps/apps/deepstream-test1/../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine failed, try rebuild
0:00:07.623796638 104 0x5f2126b51330 INFO nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:2106> [UID = 1]: Trying to create engine from model files
WARNING: [TRT]: The implicit batch dimension mode has been deprecated. Please create the network with NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag whenever possible.
WARNING: [TRT]: Missing scale and zero-point for tensor output_bbox/bias, expect fall back to non-int8 implementation for any layer consuming or producing given tensor
WARNING: [TRT]: Missing scale and zero-point for tensor conv1/kernel, expect fall back to non-int8 implementation for any layer consuming or producing given tensor
WARNING: [TRT]: Missing scale and zero-point for tensor conv1/bias, expect fall back to non-int8 implementation for any layer consuming or producing given tensor
=====Repeated warnings=======
WARNING: [TRT]: Missing scale and zero-point for tensor output_bbox/kernel, expect fall back to non-int8 implementation for any layer consuming or producing given tensor
WARNING: [TRT]: Missing scale and zero-point for tensor output_cov/kernel, expect fall back to non-int8 implementation for any layer consuming or producing given tensor
WARNING: [TRT]: Missing scale and zero-point for tensor output_cov/bias, expect fall back to non-int8 implementation for any layer consuming or producing given tensor
0:02:10.918268601 104 0x5f2126b51330 INFO nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:2138> [UID = 1]: serialize cuda engine to file: /opt/nvidia/deepstream/deepstream-6.4/samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine successfully
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:610 [Implicit Engine Info]: layers num: 3
0 INPUT kFLOAT input_1 3x544x960
1 OUTPUT kFLOAT output_bbox/BiasAdd 16x34x60
2 OUTPUT kFLOAT output_cov/Sigmoid 4x34x60
0:02:11.173705263 104 0x5f2126b51330 INFO nvinfer gstnvinfer_impl.cpp:328:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:dstest1_pgie_config.txt sucessfully