File output for python example deepstream_nvdsanalytics not working

I’m trying to modify the deep stream python example deepstream_nvdsanalytics (for DS version 6.0) to write the output to a file. When I start everything, I get the following error after the 4th frame:

[...]
##################################################
Object 2 roi status: ['RF']
Objs in ROI: {'RF': 1}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 4 stream id= 0 Number of Objects= 8 Vehicle_count= 5 Person_count= 3
##################################################
Error: gst-stream-error-quark: Internal data stream error. (1): gstqueue.c(988): gst_queue_handle_sink_event (): /GstPipeline:pipeline0/GstQueue:queue3:
streaming stopped, reason not-linked (-1)
Exiting app

[NvMultiObjectTracker] De-initialized

This is the modified version of the deepstream_nvdsanalytics.py file. I commented out things that I don’t need anymore (like EGL sink creation) and tried to add the necessary pipeline elements.

#!/usr/bin/env python3

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################

import argparse
import sys
sys.path.append('../')
import gi
import configparser
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from gi.repository import GLib
from ctypes import *
import time
import sys
import math
import platform
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from common.FPS import GETFPS

import pyds

fps_streams={}

MAX_DISPLAY_LEN=64
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
MUXER_OUTPUT_WIDTH=1920
MUXER_OUTPUT_HEIGHT=1080
MUXER_BATCH_TIMEOUT_USEC=4000000
TILED_OUTPUT_WIDTH=1280
TILED_OUTPUT_HEIGHT=720
GST_CAPS_FEATURES_NVMM="memory:NVMM"
OSD_PROCESS_MODE= 0
OSD_DISPLAY_TEXT= 1
pgie_classes_str= ["Vehicle", "TwoWheeler", "Person","RoadSign"]

# nvanlytics_src_pad_buffer_probe  will extract metadata received on nvtiler sink pad
# and update params for drawing rectangle, object information etc.
def nvanalytics_src_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    num_rects=0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        l_obj=frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
        }
        print("#"*50)
        while l_obj:
            try: 
                # Note that l_obj.data needs a cast to pyds.NvDsObjectMeta
                # The casting is done by pyds.NvDsObjectMeta.cast()
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            l_user_meta = obj_meta.obj_user_meta_list
            # Extract object level meta data from NvDsAnalyticsObjInfo
            while l_user_meta:
                try:
                    user_meta = pyds.NvDsUserMeta.cast(l_user_meta.data)
                    if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSOBJ.USER_META"):             
                        user_meta_data = pyds.NvDsAnalyticsObjInfo.cast(user_meta.user_meta_data)
                        if user_meta_data.dirStatus: print("Object {0} moving in direction: {1}".format(obj_meta.object_id, user_meta_data.dirStatus))                    
                        if user_meta_data.lcStatus: print("Object {0} line crossing status: {1}".format(obj_meta.object_id, user_meta_data.lcStatus))
                        if user_meta_data.ocStatus: print("Object {0} overcrowding status: {1}".format(obj_meta.object_id, user_meta_data.ocStatus))
                        if user_meta_data.roiStatus: print("Object {0} roi status: {1}".format(obj_meta.object_id, user_meta_data.roiStatus))
                except StopIteration:
                    break

                try:
                    l_user_meta = l_user_meta.next
                except StopIteration:
                    break
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break
    
        # Get meta data from NvDsAnalyticsFrameMeta
        l_user = frame_meta.frame_user_meta_list
        while l_user:
            try:
                user_meta = pyds.NvDsUserMeta.cast(l_user.data)
                if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSFRAME.USER_META"):
                    user_meta_data = pyds.NvDsAnalyticsFrameMeta.cast(user_meta.user_meta_data)
                    if user_meta_data.objInROIcnt: print("Objs in ROI: {0}".format(user_meta_data.objInROIcnt))                    
                    if user_meta_data.objLCCumCnt: print("Linecrossing Cumulative: {0}".format(user_meta_data.objLCCumCnt))
                    if user_meta_data.objLCCurrCnt: print("Linecrossing Current Frame: {0}".format(user_meta_data.objLCCurrCnt))
                    if user_meta_data.ocStatus: print("Overcrowding status: {0}".format(user_meta_data.ocStatus))
            except StopIteration:
                break
            try:
                l_user = l_user.next
            except StopIteration:
                break
        
        print("Frame Number=", frame_number, "stream id=", frame_meta.pad_index, "Number of Objects=",num_rects,"Vehicle_count=",obj_counter[PGIE_CLASS_ID_VEHICLE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON])
        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
        print("#"*50)

    return Gst.PadProbeReturn.OK



def cb_newpad(decodebin, decoder_src_pad,data):
    print("In cb_newpad\n")
    caps=decoder_src_pad.get_current_caps()
    gststruct=caps.get_structure(0)
    gstname=gststruct.get_name()
    source_bin=data
    features=caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    print("gstname=",gstname)
    if(gstname.find("video")!=-1):
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        print("features=",features)
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad=source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy,Object,name,user_data):
    print("Decodebin child added:", name, "\n")
    if(name.find("decodebin") != -1):
        Object.connect("child-added",decodebin_child_added,user_data)

def create_source_bin(index,uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name="source-bin-%02d" %index
    print(bin_name)
    nbin=Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri",uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added",cb_newpad,nbin)
    uri_decode_bin.connect("child-added",decodebin_child_added,nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin,uri_decode_bin)
    bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
        sys.exit(1)

    for i in range(0,len(args)-1):
        fps_streams["stream{0}".format(i)]=GETFPS(i)
    number_sources=len(args)-1

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ",i," \n ")
        uri_name=args[i+1]
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad= streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    queue1=Gst.ElementFactory.make("queue","queue1")
    queue2=Gst.ElementFactory.make("queue","queue2")
    queue3=Gst.ElementFactory.make("queue","queue3")
    queue4=Gst.ElementFactory.make("queue","queue4")
    queue5=Gst.ElementFactory.make("queue","queue5")
    queue6=Gst.ElementFactory.make("queue","queue6")
    queue7=Gst.ElementFactory.make("queue","queue7")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    print("Creating nvtracker \n ")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    print("Creating nvdsanalytics \n ")
    nvanalytics = Gst.ElementFactory.make("nvdsanalytics", "analytics")
    if not nvanalytics:
        sys.stderr.write(" Unable to create nvanalytics \n")
    nvanalytics.set_property("config-file", "config_nvdsanalytics.txt")

    print("Creating tiler \n ")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")

    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    nvosd.set_property('process-mode',OSD_PROCESS_MODE)
    nvosd.set_property('display-text',OSD_DISPLAY_TEXT)

    # if(is_aarch64()):
    #     print("Creating transform \n ")
    #     transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
    #     if not transform:
    #         sys.stderr.write(" Unable to create transform \n")

    # print("Creating EGLSink \n")
    # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    # if not sink:
    #     sys.stderr.write(" Unable to create egl sink \n")

    # added for mp4-output

    # Create a caps filter
    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))

    # Make the encoder
    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property('bitrate', bitrate)
    if is_aarch64():
        encoder.set_property('preset-level', 1)
        encoder.set_property('insert-sps-pps', 1)
        encoder.set_property('bufapi-version', 1)

    codecparse = Gst.ElementFactory.make("h264parse", "h264_parse")
    if not codecparse:
        sys.stderr.write(" Unable to create codecparse \n")

    mux = Gst.ElementFactory.make("mp4mux", "mux")
    if not mux:
        sys.stderr.write(" Unable to create mux \n")

    sink = Gst.ElementFactory.make("filesink", "filesink")
    if not sink:
        sys.stderr.write(" Unable to create filesink \n")
    sink.set_property('location', output_path)
    sink.set_property("sync", 1)
    sink.set_property("async", 0)
    #################

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dsnvanalytics_pgie_config.txt")
    pgie_batch_size=pgie.get_property("batch-size")
    if(pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
        pgie.set_property("batch-size",number_sources)
    tiler_rows=int(math.sqrt(number_sources))
    tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
    tiler.set_property("rows",tiler_rows)
    tiler.set_property("columns",tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)
    sink.set_property("qos",0)

    #Set properties of tracker
    config = configparser.ConfigParser()
    config.read('dsnvanalytics_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)
        if key == 'enable-past-frame' :
            tracker_enable_past_frame = config.getint('tracker', key)
            tracker.set_property('enable_past_frame', tracker_enable_past_frame)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(nvanalytics)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)

    # if is_aarch64():
    #     pipeline.add(transform)

    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(codecparse)
    pipeline.add(mux)
    pipeline.add(sink)

    # We link elements in the following order:
    # sourcebin -> streammux -> nvinfer -> nvtracker -> nvdsanalytics ->
    # nvtiler -> nvvideoconvert -> nvdsosd -> sink
    print("Linking elements in the Pipeline \n")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(nvanalytics)
    nvanalytics.link(queue4)
    queue4.link(tiler)
    tiler.link(queue5)
    queue5.link(nvvidconv)
    nvvidconv.link(queue6)
    queue6.link(nvosd)
    nvosd.link(queue7)
    queue7.link(caps)
    caps.link(encoder)
    encoder.link(codecparse)
    codecparse.link(mux)
    mux.link(sink)

#    if is_aarch64():
#        nvosd.link(queue7)
#        queue7.link(transform)
#        transform.link(sink)
#    else:
#        nvosd.link(queue7)
#        queue7.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)
    nvanalytics_src_pad=nvanalytics.get_static_pad("src")
    if not nvanalytics_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        nvanalytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvanalytics_src_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)


if __name__ == '__main__':
    global codec
    codec = "H264"
    global bitrate
    bitrate = 4000000
    global output_path
    output_path = "out.mp4"

    sys.exit(main(sys.argv))

I didn’t modify the pgie or tracker files and used the sample_1080p_h264.mp4 file for input.

I’d really appreciate some help!

• Hardware Platform (Jetson / GPU)
Jetson Nano
• DeepStream Version
Deepstream 6.0
• JetPack Version (valid for Jetson only)
4.6
• TensorRT Version
8.2.1-1

Could you use the GST_DEBUG=3 cli to open some debug log and attach the log?

Thanks for your reply! This is the log:

python3 deepstream_nvdsanalytics.py file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 
##########
Creating Pipeline 
 
Creating streamux 
 
Creating source_bin  0  
 
Creating source bin
source-bin-00
Creating Pgie 
 
Creating nvtracker 
 
Creating nvdsanalytics 
 
Creating tiler 
 
Creating nvvidconv 
 
Creating nvosd 
 
Creating H264 Encoder
Adding elements to Pipeline 

Linking elements in the Pipeline 

Now playing...
1 :  file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4
Starting pipeline 

Opening in BLOCKING MODE 
0:00:00.259775283 22006     0x189e0730 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x189a4710 Failed to determine interlace mode
0:00:00.259860441 22006     0x189e0730 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x189a4710 Failed to determine interlace mode
0:00:00.259901379 22006     0x189e0730 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x189a4710 Failed to determine interlace mode
0:00:00.259945911 22006     0x189e0730 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x189a4710 Failed to determine interlace mode
0:00:00.260040028 22006     0x189e0730 WARN                    v4l2 gstv4l2object.c:4476:gst_v4l2_object_probe_caps:<encoder:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
gstnvtracker: Batch processing is ON
gstnvtracker: Past frame output is OFF
[NvMultiObjectTracker] Initialized
0:00:00.373341505 22006     0x189e0730 WARN                 nvinfer gstnvinfer.cpp:635:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Warning from NvDsInferContextImpl::initialize() <nvdsinfer_context_impl.cpp:1161> [UID = 1]: Warning, OpenCV has been deprecated. Using NMS for clustering instead of cv::groupRectangles with topK = 20 and NMS Threshold = 0.5
0:00:06.053259942 22006     0x189e0730 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
INFO: [Implicit Engine Info]: layers num: 3
0   INPUT  kFLOAT input_1         3x368x640       
1   OUTPUT kFLOAT conv2d_bbox     16x23x40        
2   OUTPUT kFLOAT conv2d_cov/Sigmoid 4x23x40         

0:00:06.054495227 22006     0x189e0730 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
0:00:06.063282381 22006     0x189e0730 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:dsnvanalytics_pgie_config.txt sucessfully
0:00:06.065130126 22006     0x189e0730 WARN                 basesrc gstbasesrc.c:3583:gst_base_src_start_complete:<source> pad not activated yet
Decodebin child added: source 

Decodebin child added: decodebin0 

0:00:06.067898827 22006     0x189e0730 WARN                 basesrc gstbasesrc.c:3583:gst_base_src_start_complete:<source> pad not activated yet
Decodebin child added: qtdemux0 

0:00:06.083307937 22006   0x7efc063de0 WARN                 qtdemux qtdemux.c:3031:qtdemux_parse_trex:<qtdemux0> failed to find fragment defaults for stream 1
0:00:06.083580338 22006   0x7efc063de0 WARN                 qtdemux qtdemux.c:3031:qtdemux_parse_trex:<qtdemux0> failed to find fragment defaults for stream 2
Decodebin child added: multiqueue0 

Decodebin child added: h264parse0 

Decodebin child added: capsfilter0 

Decodebin child added: aacparse0 

Decodebin child added: avdec_aac0 

Decodebin child added: nvv4l2decoder0 

Opening in BLOCKING MODE 
0:00:06.286600219 22006   0x7f00014400 WARN                    v4l2 gstv4l2object.c:4476:gst_v4l2_object_probe_caps:<nvv4l2decoder0:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
0:00:06.286661834 22006   0x7f00014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7ef401eec0 Failed to determine interlace mode
0:00:06.286711262 22006   0x7f00014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7ef401eec0 Failed to determine interlace mode
0:00:06.286759545 22006   0x7f00014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7ef401eec0 Failed to determine interlace mode
NvMMLiteOpen : Block : BlockType = 261 
NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 261 
0:00:06.391590228 22006   0x7f00014400 WARN                    v4l2 gstv4l2object.c:4476:gst_v4l2_object_probe_caps:<nvv4l2decoder0:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
0:00:06.391853723 22006   0x7f00014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7ef401eec0 Failed to determine interlace mode
0:00:06.392099301 22006   0x7f00014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7ef401eec0 Failed to determine interlace mode
0:00:06.392339774 22006   0x7f00014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7ef401eec0 Failed to determine interlace mode
In cb_newpad

gstname= video/x-raw
features= <Gst.CapsFeatures object at 0x7f8c401168 (GstCapsFeatures at 0x7ef402a860)>
In cb_newpad

gstname= audio/x-raw
0:00:06.402292837 22006   0x7f00014400 WARN            v4l2videodec gstv4l2videodec.c:1755:gst_v4l2_video_dec_decide_allocation:<nvv4l2decoder0> Duration invalid, not setting latency
0:00:06.403093166 22006   0x7f00014400 WARN          v4l2bufferpool gstv4l2bufferpool.c:1087:gst_v4l2_buffer_pool_start:<nvv4l2decoder0:pool:src> Uncertain or not enough buffers, enabling copy threshold
0:00:06.437285618 22006   0x7ef4015ed0 WARN          v4l2bufferpool gstv4l2bufferpool.c:1536:gst_v4l2_buffer_pool_dqbuf:<nvv4l2decoder0:pool:src> Driver should never set v4l2_buffer.field to ANY
##################################################
Objs in ROI: {'RF': 0}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 0 stream id= 0 Number of Objects= 0 Vehicle_count= 0 Person_count= 0
##################################################
##################################################
Objs in ROI: {'RF': 0}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 1 stream id= 0 Number of Objects= 0 Vehicle_count= 0 Person_count= 0
##################################################
##################################################
Objs in ROI: {'RF': 0}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 2 stream id= 0 Number of Objects= 0 Vehicle_count= 0 Person_count= 0
##################################################
##################################################
Object 2 roi status: ['RF']
Objs in ROI: {'RF': 1}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 3 stream id= 0 Number of Objects= 8 Vehicle_count= 5 Person_count= 3
##################################################
##################################################
Object 2 roi status: ['RF']
Objs in ROI: {'RF': 1}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 4 stream id= 0 Number of Objects= 8 Vehicle_count= 5 Person_count= 3
##################################################
0:00:57.790118155 22006     0x181db050 WARN                   queue gstqueue.c:988:gst_queue_handle_sink_event:<queue3> error: Internal data stream error.
0:00:57.790235761 22006     0x181db050 WARN                   queue gstqueue.c:988:gst_queue_handle_sink_event:<queue3> error: streaming stopped, reason not-linked (-1)
Error: gst-stream-error-quark: Internal data stream error. (1): gstqueue.c(988): gst_queue_handle_sink_event (): /GstPipeline:pipeline0/GstQueue:queue3:
streaming stopped, reason not-linked (-1)
Exiting app

[NvMultiObjectTracker] De-initialized

You didn’t open the Gstreamer log by set the GST_DEBUG para.You can run the below cli:

GST_DEBUG=3 python3 deepstream_nvdsanalytics.py file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 

In my previous reply I executed export GST_DEBUG=3 in the cli prior to running the program.
I entered your command GST_DEBUG=3 python3 deepstream_nvdsanalytics.py file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 and the output is unfortunately the same.

chris@jetson-nano:/opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/apps/deepstream-nvdsanalytics$ GST_DEBUG=3 python3 deepstream_nvdsanalytics.py file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 
Creating Pipeline 
 
Creating streamux 
 
Creating source_bin  0  
 
Creating source bin
source-bin-00
Creating Pgie 
 
Creating nvtracker 
 
Creating nvdsanalytics 
 
Creating tiler 
 
Creating nvvidconv 
 
Creating nvosd 
 
Creating H264 Encoder
Adding elements to Pipeline 

Linking elements in the Pipeline 

Now playing...
1 :  file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4
Starting pipeline 

Opening in BLOCKING MODE 
0:00:00.269062300 16448     0x2dbe8730 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x2dbac710 Failed to determine interlace mode
0:00:00.269144801 16448     0x2dbe8730 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x2dbac710 Failed to determine interlace mode
0:00:00.269186417 16448     0x2dbe8730 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x2dbac710 Failed to determine interlace mode
0:00:00.269231938 16448     0x2dbe8730 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x2dbac710 Failed to determine interlace mode
0:00:00.269327617 16448     0x2dbe8730 WARN                    v4l2 gstv4l2object.c:4476:gst_v4l2_object_probe_caps:<encoder:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
gstnvtracker: Batch processing is ON
gstnvtracker: Past frame output is OFF
[NvMultiObjectTracker] Initialized
0:00:00.382073484 16448     0x2dbe8730 WARN                 nvinfer gstnvinfer.cpp:635:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Warning from NvDsInferContextImpl::initialize() <nvdsinfer_context_impl.cpp:1161> [UID = 1]: Warning, OpenCV has been deprecated. Using NMS for clustering instead of cv::groupRectangles with topK = 20 and NMS Threshold = 0.5
0:00:06.279556120 16448     0x2dbe8730 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
INFO: [Implicit Engine Info]: layers num: 3
0   INPUT  kFLOAT input_1         3x368x640       
1   OUTPUT kFLOAT conv2d_bbox     16x23x40        
2   OUTPUT kFLOAT conv2d_cov/Sigmoid 4x23x40         

0:00:06.280753019 16448     0x2dbe8730 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
0:00:06.290313628 16448     0x2dbe8730 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:dsnvanalytics_pgie_config.txt sucessfully
0:00:06.291961786 16448     0x2dbe8730 WARN                 basesrc gstbasesrc.c:3583:gst_base_src_start_complete:<source> pad not activated yet
Decodebin child added: source 

Decodebin child added: decodebin0 

0:00:06.294435170 16448     0x2dbe8730 WARN                 basesrc gstbasesrc.c:3583:gst_base_src_start_complete:<source> pad not activated yet
Decodebin child added: qtdemux0 

0:00:06.310030119 16448   0x7f0c0641e0 WARN                 qtdemux qtdemux.c:3031:qtdemux_parse_trex:<qtdemux0> failed to find fragment defaults for stream 1
0:00:06.310251738 16448   0x7f0c0641e0 WARN                 qtdemux qtdemux.c:3031:qtdemux_parse_trex:<qtdemux0> failed to find fragment defaults for stream 2
Decodebin child added: multiqueue0 

Decodebin child added: h264parse0 

Decodebin child added: capsfilter0 

Decodebin child added: aacparse0 

Decodebin child added: avdec_aac0 

Decodebin child added: nvv4l2decoder0 

Opening in BLOCKING MODE 
0:00:06.512747070 16448   0x7f10014400 WARN                    v4l2 gstv4l2object.c:4476:gst_v4l2_object_probe_caps:<nvv4l2decoder0:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
0:00:06.512840093 16448   0x7f10014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7f0401eec0 Failed to determine interlace mode
0:00:06.512905459 16448   0x7f10014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7f0401eec0 Failed to determine interlace mode
0:00:06.512969366 16448   0x7f10014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7f0401eec0 Failed to determine interlace mode
NvMMLiteOpen : Block : BlockType = 261 
NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 261 
0:00:06.618411179 16448   0x7f10014400 WARN                    v4l2 gstv4l2object.c:4476:gst_v4l2_object_probe_caps:<nvv4l2decoder0:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
0:00:06.618599829 16448   0x7f10014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7f0401eec0 Failed to determine interlace mode
0:00:06.618781499 16448   0x7f10014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7f0401eec0 Failed to determine interlace mode
0:00:06.618965982 16448   0x7f10014400 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7f0401eec0 Failed to determine interlace mode
In cb_newpad

gstname= video/x-raw
features= <Gst.CapsFeatures object at 0x7f9cef3168 (GstCapsFeatures at 0x7f0402a860)>
In cb_newpad

gstname= audio/x-raw
0:00:06.632728291 16448   0x7f10014400 WARN            v4l2videodec gstv4l2videodec.c:1755:gst_v4l2_video_dec_decide_allocation:<nvv4l2decoder0> Duration invalid, not setting latency
0:00:06.633094079 16448   0x7f10014400 WARN          v4l2bufferpool gstv4l2bufferpool.c:1087:gst_v4l2_buffer_pool_start:<nvv4l2decoder0:pool:src> Uncertain or not enough buffers, enabling copy threshold
0:00:06.658445528 16448   0x7f04015ed0 WARN          v4l2bufferpool gstv4l2bufferpool.c:1536:gst_v4l2_buffer_pool_dqbuf:<nvv4l2decoder0:pool:src> Driver should never set v4l2_buffer.field to ANY
##################################################
Objs in ROI: {'RF': 0}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 0 stream id= 0 Number of Objects= 0 Vehicle_count= 0 Person_count= 0
##################################################
##################################################
Objs in ROI: {'RF': 0}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 1 stream id= 0 Number of Objects= 0 Vehicle_count= 0 Person_count= 0
##################################################
##################################################
Objs in ROI: {'RF': 0}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 2 stream id= 0 Number of Objects= 0 Vehicle_count= 0 Person_count= 0
##################################################
##################################################
Object 2 roi status: ['RF']
Objs in ROI: {'RF': 1}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 3 stream id= 0 Number of Objects= 8 Vehicle_count= 5 Person_count= 3
##################################################
##################################################
Object 2 roi status: ['RF']
Objs in ROI: {'RF': 1}
Linecrossing Cumulative: {'Exit': 0}
Linecrossing Current Frame: {'Exit': 0}
Frame Number= 4 stream id= 0 Number of Objects= 8 Vehicle_count= 5 Person_count= 3
##################################################
0:00:57.959206361 16448     0x2d3e3050 WARN                   queue gstqueue.c:988:gst_queue_handle_sink_event:<queue3> error: Internal data stream error.
0:00:57.959363031 16448     0x2d3e3050 WARN                   queue gstqueue.c:988:gst_queue_handle_sink_event:<queue3> error: streaming stopped, reason not-linked (-1)
Error: gst-stream-error-quark: Internal data stream error. (1): gstqueue.c(988): gst_queue_handle_sink_event (): /GstPipeline:pipeline0/GstQueue:queue3:
streaming stopped, reason not-linked (-1)
Exiting app

[NvMultiObjectTracker] De-initialized

I don’t know if this is helpful, but I executed the program with GST_DEBUG=4 as well and this is the output (I put it in a file since it is so much).

log.txt (402.3 KB)

Maybe this part of the file is relevant, but I don’t know yet what it means.

0:00:06.695940162 16912     0x21e4a990 INFO               GST_EVENT gstevent.c:814:gst_event_new_caps: creating caps event video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, batch-size=(int)1, num-surfaces-per-frame=(int)1, format=(string)RGBA, block-linear=(boolean)false
0:00:06.696045737 16912     0x21e4a990 INFO                GST_PADS gstpad.c:4232:gst_pad_peer_query:<queue7:src> pad has no peer
0:00:06.696198032 16912     0x21e4ab70 INFO                GST_PADS gstpad.c:4232:gst_pad_peer_query:<queue7:src> pad has no peer

You pipeline have some problem. You can try to set your sink plugin to fakesink.

Unfortunately, setting the sink plugin to ‘fakesink’ could not resolve the issue and didn’t provide any additional information.

However, I could finally figure out what the issue was. Apparently it is necessary to add a second nvvideoconvert element to the pipeline, which is created with the convertor_postosd specification and needs to be linked after the nvosd element.

So I had to create:

nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
if not nvvidconv_postosd:
    sys.stderr.write(" Unable to create nvvidconv_postosd \n")

Add it to the pipeline:

pipeline.add(nvvidconv_postosd)

And link it after the nvosd element:

[...]
queue6.link(nvosd)
nvosd.link(queue7)
queue7.link(nvvidconv_postosd)
nvvidconv_postosd.link(caps)
[...]

Now it is finally writing the output to a video file.

For anyone in the future with the same issue, this is the code of the deepstream_nvdsanalytics.py file:

#!/usr/bin/env python3

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################

import argparse
import sys

sys.path.append('../')
import gi
import configparser

gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from gi.repository import GLib
from ctypes import *
import time
import sys
import math
import platform
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from common.FPS import GETFPS

import pyds

fps_streams = {}

MAX_DISPLAY_LEN = 64
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 4000000
TILED_OUTPUT_WIDTH = 1280
TILED_OUTPUT_HEIGHT = 720
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
OSD_PROCESS_MODE = 0
OSD_DISPLAY_TEXT = 1
pgie_classes_str = ["Vehicle", "TwoWheeler", "Person", "RoadSign"]


# nvanlytics_src_pad_buffer_probe  will extract metadata received on nvtiler sink pad
# and update params for drawing rectangle, object information etc.
def nvanalytics_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        print("#" * 50)
        while l_obj:
            try:
                # Note that l_obj.data needs a cast to pyds.NvDsObjectMeta
                # The casting is done by pyds.NvDsObjectMeta.cast()
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            l_user_meta = obj_meta.obj_user_meta_list
            # Extract object level meta data from NvDsAnalyticsObjInfo
            while l_user_meta:
                try:
                    user_meta = pyds.NvDsUserMeta.cast(l_user_meta.data)
                    if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSOBJ.USER_META"):
                        user_meta_data = pyds.NvDsAnalyticsObjInfo.cast(user_meta.user_meta_data)
                        if user_meta_data.dirStatus: print(
                            "Object {0} moving in direction: {1}".format(obj_meta.object_id, user_meta_data.dirStatus))
                        if user_meta_data.lcStatus: print(
                            "Object {0} line crossing status: {1}".format(obj_meta.object_id, user_meta_data.lcStatus))
                        if user_meta_data.ocStatus: print(
                            "Object {0} overcrowding status: {1}".format(obj_meta.object_id, user_meta_data.ocStatus))
                        if user_meta_data.roiStatus: print(
                            "Object {0} roi status: {1}".format(obj_meta.object_id, user_meta_data.roiStatus))
                except StopIteration:
                    break

                try:
                    l_user_meta = l_user_meta.next
                except StopIteration:
                    break
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Get meta data from NvDsAnalyticsFrameMeta
        l_user = frame_meta.frame_user_meta_list
        while l_user:
            try:
                user_meta = pyds.NvDsUserMeta.cast(l_user.data)
                if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSFRAME.USER_META"):
                    user_meta_data = pyds.NvDsAnalyticsFrameMeta.cast(user_meta.user_meta_data)
                    if user_meta_data.objInROIcnt: print("Objs in ROI: {0}".format(user_meta_data.objInROIcnt))
                    if user_meta_data.objLCCumCnt: print(
                        "Linecrossing Cumulative: {0}".format(user_meta_data.objLCCumCnt))
                    if user_meta_data.objLCCurrCnt: print(
                        "Linecrossing Current Frame: {0}".format(user_meta_data.objLCCurrCnt))
                    if user_meta_data.ocStatus: print("Overcrowding status: {0}".format(user_meta_data.ocStatus))
            except StopIteration:
                break
            try:
                l_user = l_user.next
            except StopIteration:
                break

        print("Frame Number=", frame_number, "stream id=", frame_meta.pad_index, "Number of Objects=", num_rects,
              "Vehicle_count=", obj_counter[PGIE_CLASS_ID_VEHICLE], "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON])
        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
        print("#" * 50)

    return Gst.PadProbeReturn.OK


def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    print("gstname=", gstname)
    if (gstname.find("video") != -1):
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        print("features=", features)
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")


def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if (name.find("decodebin") != -1):
        Object.connect("child-added", decodebin_child_added, user_data)


def create_source_bin(index, uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin


def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
        sys.exit(1)

    for i in range(0, len(args) - 1):
        fps_streams["stream{0}".format(i)] = GETFPS(i)
    number_sources = len(args) - 1

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    queue1 = Gst.ElementFactory.make("queue", "queue1")
    queue2 = Gst.ElementFactory.make("queue", "queue2")
    queue3 = Gst.ElementFactory.make("queue", "queue3")
    queue4 = Gst.ElementFactory.make("queue", "queue4")
    queue5 = Gst.ElementFactory.make("queue", "queue5")
    queue6 = Gst.ElementFactory.make("queue", "queue6")
    queue7 = Gst.ElementFactory.make("queue", "queue7")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    print("Creating nvtracker \n ")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    print("Creating nvdsanalytics \n ")
    nvanalytics = Gst.ElementFactory.make("nvdsanalytics", "analytics")
    if not nvanalytics:
        sys.stderr.write(" Unable to create nvanalytics \n")
    nvanalytics.set_property("config-file", "config_nvdsanalytics.txt")

    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")

    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    nvosd.set_property('process-mode', OSD_PROCESS_MODE)
    nvosd.set_property('display-text', OSD_DISPLAY_TEXT)

    nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
    if not nvvidconv_postosd:
        sys.stderr.write(" Unable to create nvvidconv_postosd \n")

    # Create a caps filter
    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))

    # Make the encoder
    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property('bitrate', bitrate)
    if is_aarch64():
        encoder.set_property('preset-level', 1)
        encoder.set_property('insert-sps-pps', 1)
        encoder.set_property('bufapi-version', 1)

    codecparse = Gst.ElementFactory.make("h264parse", "h264_parse")
    if not codecparse:
        sys.stderr.write(" Unable to create codecparse \n")

    mux = Gst.ElementFactory.make("mp4mux", "mux")
    if not mux:
        sys.stderr.write(" Unable to create mux \n")

    sink = Gst.ElementFactory.make("filesink", "filesink")
    # sink = Gst.ElementFactory.make("fakesink", "fakesink")
    if not sink:
        sys.stderr.write(" Unable to create filesink \n")
    sink.set_property('location', output_path)
    sink.set_property("sync", 0)
    sink.set_property("async", 1)
    #################

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "config_pgie.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)
    sink.set_property("qos", 0)

    # Set properties of tracker
    config = configparser.ConfigParser()
    config.read('config_tracker.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width':
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height':
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id':
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file':
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file':
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process':
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)
        if key == 'enable-past-frame':
            tracker_enable_past_frame = config.getint('tracker', key)
            tracker.set_property('enable_past_frame', tracker_enable_past_frame)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(nvanalytics)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv_postosd)
    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(codecparse)
    pipeline.add(mux)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(nvanalytics)
    nvanalytics.link(queue4)
    queue4.link(tiler)
    tiler.link(queue5)
    queue5.link(nvvidconv)
    nvvidconv.link(queue6)
    queue6.link(nvosd)
    nvosd.link(queue7)
    queue7.link(nvvidconv_postosd)
    nvvidconv_postosd.link(caps)
    caps.link(encoder)
    queue7.link(encoder)
    encoder.link(codecparse)
    codecparse.link(mux)
    mux.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
    nvanalytics_src_pad = nvanalytics.get_static_pad("src")
    if not nvanalytics_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        nvanalytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvanalytics_src_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)


if __name__ == '__main__':
    global codec
    codec = "H264"
    global bitrate
    bitrate = 4000000
    global output_path
    output_path = "out.mp4"

    sys.exit(main(sys.argv))

In the command line you only have to add the input sources. It will always write to out.mp4. I didn’t modify the config files of the example project.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.