same in pyds…
import sys
import gi
import os # Import os module for path manipulation
# Import bus_call from common.bus_call
# Ensure that '/root/deepstream_python_apps/apps/' is in your PYTHONPATH or sys.path
# or that the common module is otherwise discoverable.
sys.path.append('/root/deepstream_python_apps/apps/')
from common.bus_call import bus_call
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, GLib
import pyds # DeepStream Python Bindings
# Define pipeline properties
MUXER_WIDTH = 1280
MUXER_HEIGHT = 720
BATCH_SIZE = 1 # Simplest case: 1 stream in batch
# --- Common Paths and Configuration ---
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
IMAGE_FILE = '/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.jpg'
CONFIG_FILE_PATH = os.path.join(CURRENT_DIR, "config_infer_primary_yoloV8_F-H.txt")
# --- Main Function ---
def main():
# 1. Standard GStreamer initialization
# Gst.init(None) initializes the GStreamer library.
Gst.init(None)
# 2. Create the GStreamer pipeline
# Gst.Pipeline.new() creates an empty pipeline.
pipeline = Gst.Pipeline.new("simplest-deepstream-pipeline")
# 3. Create GStreamer elements
# Gst.ElementFactory.make() creates an element by its factory name.
# The second argument is a unique name for the element instance.
# Source: uridecodebin - Decodes data from a URI (file, RTSP, etc.)
print(f"Creating uridecodebin for: {IMAGE_FILE}...")
source = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
# Set the URI property to the image file
source.set_property("uri", f"file://{IMAGE_FILE}")
# Video Converter: nvvideoconvert - Converts video formats on NVIDIA GPU
print("Creating nvvideoconvert...")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "nvvideo-converter")
# Capsfilter: Ensures the format is NV12 and memory is NVMM (NVIDIA Memory)
# This is crucial for DeepStream elements like nvstreammux.
print("Creating capsfilter for NVMM, NV12...")
caps_nvmm_nv12 = Gst.ElementFactory.make("capsfilter", "caps-nvmm-nv12")
# Define the capabilities (format, memory type, resolution)
caps_string = f"video/x-raw(memory:NVMM), format=NV12, width={MUXER_WIDTH}, height={MUXER_HEIGHT}"
caps = Gst.Caps.from_string(caps_string)
caps_nvmm_nv12.set_property("caps", caps)
# Stream Muxer: nvstreammux - Batches frames from multiple sources
# This is a core DeepStream element.
print("Creating nvstreammux...")
streammux = Gst.ElementFactory.make("nvstreammux", "stream-muxer")
streammux.set_property("width", MUXER_WIDTH)
streammux.set_property("height", MUXER_HEIGHT)
streammux.set_property("batch-size", BATCH_SIZE)
# For file sources, it's generally not a 'live' stream in the traditional sense for muxer
streammux.set_property("nvbuf-memory-type", 0) # 0 for NVBUF_MEM_CUDA_UNIFIED
# Primary GIE (Inference Engine): nvinfer - Runs AI models
# This element performs the actual deep learning inference.
print("Creating nvinfer...")
pgie = Gst.ElementFactory.make("nvinfer", "primary-gie")
# Set the actual config file path
print(f"Setting nvinfer config-file-path to: {CONFIG_FILE_PATH}")
pgie.set_property("config-file-path", CONFIG_FILE_PATH)
# Sink: fakesink - Discards data, useful for testing pipeline flow
print("Creating fakesink...")
sink = Gst.ElementFactory.make("fakesink", "fake-sink")
sink.set_property("sync", False) # Don't sync to clock for fakesink
sink.set_property("qos", False) # Disable Quality-of-Service for fakesink
# 4. Add elements to the pipeline
# All elements must be added to the pipeline before linking.
print("Adding elements to pipeline...")
pipeline.add(source)
pipeline.add(nvvidconv)
pipeline.add(caps_nvmm_nv12)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(sink)
# 5. Link elements
# Link elements in the correct order.
# Source -> nvvideoconvert -> capsfilter -> nvstreammux.sink_0
# nvstreammux.src -> nvinfer -> fakesink
# Callback for uridecodebin's dynamically created pads
def cb_newpad(decodebin, decoder_src_pad, data):
print("In cb_newpad")
caps = decoder_src_pad.get_current_caps()
gststruct = caps.get_structure(0)
gstname = gststruct.get_name()
# Check if the pad is for video and has NVMM memory features
if gstname.find("video") != -1 and decoder_src_pad.has_current_caps() and \
decoder_src_pad.get_current_caps().get_features(0).contains("memory:NVMM"):
print("Decoder current pad has NVMM memory features, linking to nvvideoconvert.")
# Link decodebin output to nvvideoconvert
decoder_src_pad.link(nvvidconv.get_static_pad("sink"))
else:
sys.stderr.write(f"WARNING: Decodebin pad {gstname} does not have NVMM video caps or is not video. Skipping.\n")
source.connect("pad-added", cb_newpad, pipeline)
# Link nvvideoconvert to capsfilter
nvvidconv.link(caps_nvmm_nv12)
# Get a request pad from nvstreammux for source_id=0 (first stream)
# This is how individual sources connect to the muxer.
sinkpad = streammux.get_request_pad(f"sink_{0}")
# Corrected linking: Link the source pad of caps_nvmm_nv12 to the sinkpad of streammux
caps_nvmm_nv12.get_static_pad("src").link(sinkpad)
# Link nvstreammux source pad to nvinfer sink pad
streammux.link(pgie)
# Link nvinfer source pad to fakesink sink pad
pgie.link(sink)
# 6. Set up a GLib MainLoop
# The main loop is necessary to run the GStreamer pipeline and process events.
loop = GLib.MainLoop()
# 7. Set up a bus watch to catch messages from the pipeline
# This allows the script to react to pipeline events like EOS or errors.
bus = pipeline.get_bus()
bus.add_signal_watch()
# Connect the imported bus_call function
bus.connect("message", bus_call, loop)
# 8. Start the pipeline
# Set the pipeline state to PLAYING to start processing.
print("Setting pipeline to PLAYING...\n")
pipeline.set_state(Gst.State.PLAYING)
# 9. Run the GLib MainLoop
# This blocks until the loop is quit (e.g., by EOS or error).
try:
loop.run()
except KeyboardInterrupt:
# Handle Ctrl+C gracefully
pass
# 10. Clean up
# Set the pipeline state to NULL to release all resources.
print("Exiting and freeing pipeline resources...")
pipeline.set_state(Gst.State.NULL)
# --- Entry Point ---
if __name__ == '__main__':
sys.exit(main())
outputs
root@Nitro-AN515-56:~/DS_test# python3 infer_img_test.py
Creating uridecodebin for: /opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.jpg...
Creating nvvideoconvert...
Creating capsfilter for NVMM, NV12...
Creating nvstreammux...
Creating nvinfer...
Setting nvinfer config-file-path to: /root/DS_face_rec/config_infer_primary_yoloV8_F-H.txt
Creating fakesink...
Adding elements to pipeline...
/root/DS_face_rec/infer_img_test.py:124: DeprecationWarning: Gst.Element.get_request_pad is deprecated
sinkpad = streammux.get_request_pad(f"sink_{0}")
Setting pipeline to PLAYING...
0:00:00.266536084 886 0x562c191691c0 INFO nvinfer gstnvinfer.cpp:684:gst_nvinfer_logger:<primary-gie> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2092> [UID = 1]: deserialized trt engine from :/root/DeepStream-Yolo/p-h_model_b1_gpu0_fp16.engine
Implicit layer support has been deprecated
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:327 [Implicit Engine Info]: layers num: 0
0:00:00.266573308 886 0x562c191691c0 INFO nvinfer gstnvinfer.cpp:684:gst_nvinfer_logger:<primary-gie> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2195> [UID = 1]: Use deserialized engine model: /root/DeepStream-Yolo/p-h_model_b1_gpu0_fp16.engine
0:00:00.273276063 886 0x562c191691c0 INFO nvinfer gstnvinfer_impl.cpp:343:notifyLoadModelStatus:<primary-gie> [UID 1]: Load new model:/root/DS_face_rec/config_infer_primary_yoloV8_F-H.txt sucessfully
Using GPU 0 (NVIDIA GeForce GTX 1650, 14 SMs, 1024 th/SM max, CC 7.5, ECC off)
In cb_newpad
Decoder current pad has NVMM memory features, linking to nvvideoconvert.
nvstreammux: Successfully handled EOS for source_id=0
End-of-stream
Exiting and freeing pipeline resources...