How to save the output in a file for multiple input sources

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU): NVIDIA Jetson Nano
• DeepStream Version: 5.0.1-1
• JetPack Version (valid for Jetson only): 4.4.1 [L4T 32.4.4]
• TensorRT Version: 7.1.3-1+cuda10.2

How can I save the output in a file for multiple input sources? I am giving multiple sources and the output is currently being displayed on the screen. I want to save it in a file.

This is my main function.

def main():   
    number_sources=len(args)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ",i," \n ")
        uri_name=args[i]
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad= streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if(is_aarch64()):
        print("Creating transform \n ")
        transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    # sink = Gst.ElementFactory.make("fakesink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)


    pgie.set_property('config-file-path', "deepstream_config.txt")

    pgie.set_property('interval', INTERVAL)    

    pgie_batch_size=pgie.get_property("batch-size")
    if(pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
        pgie.set_property("batch-size",number_sources)
    
    tiler_rows=int(math.sqrt(number_sources))
    tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
    tiler.set_property("rows",tiler_rows)
    tiler.set_property("columns",tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("sync", 0)


    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)



    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    #Set properties of tracker
    config = configparser.ConfigParser()
    config.read('configs/deepstream_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)

    print('enable-batch-process', tracker.get_property('enable-batch-process'))

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    
    if DEEPSTREAM_SHOW_OUTPUT:
        if is_aarch64():
            pipeline.add(transform)
    
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)    
    
    pgie.link(tracker)
    tracker.link(nvvidconv1)

    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)

    if DEEPSTREAM_SHOW_OUTPUT:
        if is_aarch64():
            nvosd.link(transform)
            transform.link(sink)
        else:
            nvosd.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    tiler_sink_pad=tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

I have separate code that can save for one input source but don’t know how to adapt the above function to save output.

The separate code is
single_in_single_out.py (11.6 KB)

I have tried to adapt my main function according to the single_in_single_out.py But it is giving me an error.

This is the new main function:

def main():   
    number_sources=len(args)
         

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ",i," \n ")
        uri_name=args[i]
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad= streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    
    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if(is_aarch64()):
        print("Creating transform \n ")
        transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")
    

    nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
    if not nvvidconv_postosd:
        sys.stderr.write(" Unable to create nvvidconv_postosd \n")
    
    # Create a caps filter
    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))
    
    
    codec = "H264"
    bitrate = 4000000
    # Make the encoder
    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property('bitrate', bitrate)
    if is_aarch64():
        encoder.set_property('preset-level', 1)
        encoder.set_property('insert-sps-pps', 1)
        #encoder.set_property('bufapi-version', 1)


    # Since the data format in the input file is elementary h264 stream,
    # we need a h264parser
    print("Creating H264Parser \n")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    if not h264parser:
        sys.stderr.write(" Unable to create h264 parser \n")
    
    # Use nvdec_h264 for hardware accelerated decode on GPU
    print("Creating Decoder \n")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
    if not decoder:
        sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")


    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)


    pgie.set_property('config-file-path', "deepstream_config.txt")

    pgie.set_property('interval', INTERVAL)
    

    pgie_batch_size=pgie.get_property("batch-size")
    if(pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
        pgie.set_property("batch-size",number_sources)
    
    tiler_rows=int(math.sqrt(number_sources))
    tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
    tiler.set_property("rows",tiler_rows)
    tiler.set_property("columns",tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    # sink.set_property("sync", 0)

    print("Creating Sink \n")
    sink = Gst.ElementFactory.make("filesink", "filesink")
    if not sink:
        sys.stderr.write(" Unable to create file sink \n")

    sink.set_property("location", "./out.mp4")
    sink.set_property("sync", 1)
    sink.set_property("async", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)



    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    #Set properties of tracker
    config = configparser.ConfigParser()
    config.read('configs/deepstream_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)

    print('enable-batch-process', tracker.get_property('enable-batch-process'))

    print("Adding elements to Pipeline \n")

    pipeline.add(h264parser)
    pipeline.add(decoder)
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv_postosd)
    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(sink)


    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tracker)

    tracker.link(h264parser)
    h264parser.link(decoder)

    decoder.link(nvvidconv1)
    
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(caps)
    caps.link(encoder)

    encoder.link(sink)



    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    tiler_sink_pad=tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

This is the error I am getting:

(gst-plugin-scanner:10733): GStreamer-WARNING **: 01:59:10.516: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_inferserver.so': libtritonserver.so: cannot open shared object file: No such file or directory

(gst-plugin-scanner:10733): GStreamer-WARNING **: 01:59:10.523: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_udp.so': librivermax.so.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:10733): GStreamer-WARNING **: 01:59:10.710: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/libgstchromaprint.so': /usr/lib/python3.6/dist-packages/cv2/../../../../lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block

(gst-plugin-scanner:10733): GStreamer-WARNING **: 01:59:10.795: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/libgstlibav.so': /usr/lib/python3.6/dist-packages/cv2/../../../../lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block
Creating Pipeline 
 
Creating streamux 
 
Creating source_bin  0  
 
Creating source bin
source-bin-00
Creating source_bin  1  
 
Creating source bin
source-bin-01
Creating source_bin  2  
 
Creating source bin
source-bin-02
Creating source_bin  3  
 
Creating source bin
source-bin-03
Creating source_bin  4  
 
Creating source bin
source-bin-04
Creating Pgie 
 
Creating nvvidconv1 
 
Creating filter1 
 
Creating tiler 
 
Creating nvvidconv 
 
Creating nvosd 
 
Creating transform 
 
Creating H264 Encoder
Creating H264Parser 

Creating Decoder 

Unknown or legacy key specified 'enable' for group [property]
Unknown or legacy key specified 'config-width' for group [property]
Unknown or legacy key specified 'config-height' for group [property]
Unknown or legacy key specified 'osd-mode' for group [property]
Unknown or legacy key specified 'display-font-size' for group [property]
Unknown or legacy key specified 'is-classifier' for group [property]
Unknown or legacy key specified 'file-loop' for group [property]
WARNING: Overriding infer-config batch-size 1  with number of sources  5  

Creating Sink 

enable-batch-process True
Adding elements to Pipeline 

Linking elements in the Pipeline 


(python3:10732): GStreamer-WARNING **: 01:59:11.694: Trying to link elements primary-inference and tracker that don't share a common ancestor: tracker hasn't been added to a bin or pipeline, and primary-inference is in pipeline0

(python3:10732): GStreamer-WARNING **: 01:59:11.694: Trying to link elements primary-inference and tracker that don't share a common ancestor: tracker hasn't been added to a bin or pipeline, and primary-inference is in pipeline0
Now playing...
1 :  file:///opt/nvidia/deepstream/deepstream-6.0/samples/streams/sample_720p.h264
2 :  file:///opt/nvidia/deepstream/deepstream-6.0/samples/streams/sample_720p.h264
3 :  file:///opt/nvidia/deepstream/deepstream-6.0/samples/streams/sample_720p.h264
4 :  file:///opt/nvidia/deepstream/deepstream-6.0/samples/streams/sample_720p.h264
Starting pipeline 

Opening in BLOCKING MODE 
Opening in BLOCKING MODE 
Deserialize yoloLayer plugin: yolo
0:00:07.240976612 10732     0x1e9c8440 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/home/aqib/aqib/bus_monitoring_system/nvidia_jetson_nano/deepstream-6.0/model_b5_gpu0_fp16.engine
INFO: [Implicit Engine Info]: layers num: 5
0   INPUT  kFLOAT data            3x416x416       
1   OUTPUT kFLOAT num_detections  1               
2   OUTPUT kFLOAT detection_boxes 2535x4          
3   OUTPUT kFLOAT detection_scores 2535            
4   OUTPUT kFLOAT detection_classes 2535            

0:00:07.241295106 10732     0x1e9c8440 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /home/aqib/aqib/bus_monitoring_system/nvidia_jetson_nano/deepstream-6.0/model_b5_gpu0_fp16.engine
0:00:07.483460453 10732     0x1e9c8440 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:deepstream_config.txt sucessfully
Decodebin child added: source 

Decodebin child added: decodebin0 

Decodebin child added: source 

Decodebin child added: decodebin1 

Decodebin child added: source 

Decodebin child added: decodebin2 

Decodebin child added: source 

Decodebin child added: decodebin3 

Decodebin child added: source 

Decodebin child added: decodebin4 

Decodebin child added: h264parse1 
Decodebin child added: h264parse0 


Decodebin child added: capsfilter0 
Decodebin child added: capsfilter1 


Decodebin child added: nvv4l2decoder0 

Seting bufapi_version

Decodebin child added: nvv4l2decoder1 

Seting bufapi_version

Decodebin child added: h264parse2 
Decodebin child added: h264parse3 


Decodebin child added: capsfilter2 
Opening in BLOCKING MODE 

NvMMLiteOpen : Block : BlockType = 261 
NVMEDIA: Reading vendor.tegra.display-size : status: 6 
Decodebin child added: h264parse4 
Opening in BLOCKING MODE 

Decodebin child added: capsfilter3 

Decodebin child added: nvv4l2decoder2 

Seting bufapi_version

NvMMLiteOpen : Block : BlockType = 261 
NvMMLiteBlockCreate : Block : BlockType = 261 
Decodebin child added: capsfilter4 

NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 261 
Decodebin child added: nvv4l2decoder3 

Opening in BLOCKING MODE 
Seting bufapi_version

NvMMLiteOpen : Block : BlockType = 261 
Decodebin child added: nvv4l2decoder4 

Seting bufapi_version

Opening in BLOCKING MODE 
NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 261 
Opening in BLOCKING MODE 
NvMMLiteOpen : Block : BlockType = 261 
In cb_newpad

NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteOpen : Block : BlockType = 261 
In cb_newpad

NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 261 
NvMMLiteBlockCreate : Block : BlockType = 261 
In cb_newpad

In cb_newpad

In cb_newpad

0:00:08.887510428 10732     0x1e9d9590 WARN                 nvinfer gstnvinfer.cpp:2288:gst_nvinfer_output_loop:<primary-inference> error: Internal data stream error.
0:00:08.887614283 10732     0x1e9d9590 WARN                 nvinfer gstnvinfer.cpp:2288:gst_nvinfer_output_loop:<primary-inference> error: streaming stopped, reason not-linked (-1)
Error: gst-stream-error-quark: Internal data stream error. (1): /dvs/git/dirty/git-master_linux/deepstream/sdk/src/gst-plugins/gst-nvinfer/gstnvinfer.cpp(2288): gst_nvinfer_output_loop (): /GstPipeline:pipeline0/GstNvInfer:primary-inference:
streaming stopped, reason not-linked (-1)
Exiting app

I am not much familiar with Gstreamer.

You can refer our demo app to save the picture to jpeg format first:
https://github.com/NVIDIA-AI-IOT/deepstream_python_apps/tree/master/apps/deepstream-imagedata-multistream

I am familiar with that example. That example is using Opencv to save a frame. My issue is related to the pipeline. I am looking to save the output in a file through the pipeline.

We don’t have the pipeline examples of this at present. But you can refer the gstreamer multifilesink plugin below: https://gstreamer.freedesktop.org/documentation/multifile/multifilesink.html?gi-language=c

I am trying to save the output to a single file. Inputs are multiple. Can you please check my code (One message before) and tell me where I am going wrong? That will be helpful.

1.We suggust you use uridecoderbin instead of filesource.
2.If you want to use multiple sources, you should create a source_bin.
You can refer our demo code:
https://github.com/NVIDIA-AI-IOT/deepstream_python_apps/blob/master/apps/deepstream-imagedata-multistream/deepstream_imagedata-multistream.py

Actually, I am using a source_bin. The issue is with saving the output in a file. The inputs are fine. I am not being able to save the output in a file.

The main function I shared in the first message runs fine. It shows 5 videos (2 rows, 3 columns) on the screen. But I want to save the output in a file.

I shared another main function (2 messages before) in which I tried to store the output in a file but it is giving an error (shown in the same message). There is an issue with the pipeline. I am not familiar with GStreamer so if you can check the pipeline, that will be helpful. Thanks

From your code, you didn’t link the nvinfer plugin. If you don’t familar with Gstreamer, we suggest you run our demo first and change the code step by step. About how to save the stream to a file, you can refer the link below:
https://forums.developer.nvidia.com/t/deepstream-sdk-faq/80236/30

I have some progress. The link (DeepStream SDK FAQ - #30 by Amycao) has helped. Thanks for that.

Actually, my code is built on [deepstream_imagedata-multistream.py]. Now I have made the changes according to the (DeepStream SDK FAQ - #30 by Amycao).

Now it runs fine when running with sink type 3 (<sink type: 1-filesink; 2-fakesink; 3-display sink>). But when running with sink type 1, it creates an output file (out.h264) and the file size keeps increasing but I am unable to open the file.

I think I am very close to solving the issue. Can you please look at below main function and check? Thanks for the help.

def main():   
    number_sources=len(args)
    OUTPUT = 3

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")


    # Since the data format in the input file is elementary h264 stream,
    # we need a h264parser
    print("Creating H264Parser \n")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    if not h264parser:
        sys.stderr.write(" Unable to create h264 parser \n")

    # Use nvdec_h264 for hardware accelerated decode on GPU
    print("Creating Decoder \n")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
    if not decoder:
        sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")


    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ",i," \n ")
        uri_name=args[i]
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)

        padname="sink_%u" %i
        sinkpad= streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")


    print("Creating tiler \n ")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    


    if OUTPUT == 1:

        nvvidconv1 = Gst.ElementFactory.make ("nvvideoconvert", "nvvid-converter1")
        if not nvvidconv1:
            sys.stderr.write("Unable to create nvvidconv1")
        capfilt = Gst.ElementFactory.make ("capsfilter", "nvvideo-caps")
        if not capfilt:
            sys.stderr.write("Unable to create capfilt")
        caps = Gst.caps_from_string ('video/x-raw(memory:NVMM), format=I420')
#        feature = gst_caps_features_new ("memory:NVMM", NULL)
#        gst_caps_set_features (caps, 0, feature)
        capfilt.set_property('caps', caps)
        print("Creating nvv4l2h264enc \n")
        nvh264enc = Gst.ElementFactory.make ("nvv4l2h264enc" ,"nvvideo-h264enc")
        if not nvh264enc:
            sys.stderr.write("Unable to create nvh264enc")
        print("Creating filesink \n")    
        sink = Gst.ElementFactory.make ("filesink", "nvvideo-renderer")
        sink.set_property('location', './out.h264')
        if not sink:
            sys.stderr.write("Unable to create filesink")

    elif OUTPUT == 2:

        print("Creating fakesink \n")
        sink = Gst.ElementFactory.make ("fakesink", "fake-renderer")
        if not sink:
            sys.stderr.write("Unable to create fakesink")

    elif OUTPUT == 3:

        print("Creating EGLSink \n")
        sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")
        if is_aarch64():
            transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
            if not transform:
                sys.stderr.write(" Unable to create egl transform \n")




    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)


    pgie.set_property('config-file-path', "deepstream_config.txt")

    pgie.set_property('interval', INTERVAL)
    

    pgie_batch_size=pgie.get_property("batch-size")
    if(pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
        pgie.set_property("batch-size",number_sources)
    
    tiler_rows=int(math.sqrt(number_sources))
    tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
    tiler.set_property("rows",tiler_rows)
    tiler.set_property("columns",tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("sync", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)


    print("Adding elements to Pipeline \n")
    pipeline.add(h264parser)
    pipeline.add(decoder)    
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    
   
    if OUTPUT == 1:
        pipeline.add(nvvidconv1)
        pipeline.add(capfilt)
        pipeline.add(nvh264enc)
        pipeline.add(sink)
    elif OUTPUT == 2:
        pipeline.add(sink)
    elif OUTPUT == 3:
        pipeline.add(sink)
        if is_aarch64():
            pipeline.add(transform)



    print("Linking elements in the Pipeline \n")
    source_bin.link(h264parser)   
    h264parser.link(decoder)

    streammux.link(pgie)    

    pgie.link(nvvidconv)

    nvvidconv.link(tiler)
    tiler.link(nvosd)


    if OUTPUT == 1:
        nvosd.link(nvvidconv1)
        nvvidconv1.link(capfilt)
        capfilt.link(nvh264enc)
        nvh264enc.link(sink)
    elif OUTPUT == 2:
        nvosd.link(sink)
    elif OUTPUT == 3:
        if is_aarch64():
            nvosd.link(transform)
            transform.link(sink)
        else:
            nvosd.link(sink)



    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    tiler_sink_pad=tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

Cause it’s h264 format stream. You cannot open it with some video player. You can use some tools, like ffmpeg to convert it to mp4 foramt or just play it with ffmpeg.

It is working great now. I just changed the extension of output file and it is now saving in mp4.

sink.set_property('location', './out.mp4')

Thank you very much for the help.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.