Smart Recording on GPU-1 Also Loads Processes on GPU-0

Please provide complete information as applicable to your setup.

**• Hardware Platform -----> GPU(2 L4)
**• DeepStream Version ------> 7.0
**• TensorRT Version ---------> 8.6.2.1
**• NVIDIA GPU Driver Version --------> 535.230.02

we have 2 L4’s and every in each GPU we are spawning 4 processes, each process having 20 cameras.
While triggering start-sr signal to record video for GPU-1 cameras, It is loading existing GPU-1 processes in the GPU-0 as well. But, if I trigger start-sr signal to record video for GPU-0 cameras, It is not affecting anything.

Below is the screenshot of how GPU-1 processes getting loaded in GPU-0 when I trigger GPU-1 cameras and we are attaching our code. please check and confirm us where is it going wrong?

import sys
sys.path.append("../")
# from bus_call import bus_call
import pyds
import math
import time
from ctypes import *
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GLib
import configparser
import datetime
import os
import argparse
import multiprocessing
import time
from threading import Lock
from collections import defaultdict, deque
from typing import Tuple, Dict, Deque
import numpy as np
import traceback, time, copy 
import faulthandler
faulthandler.enable()
import uuid
from os import environ
import ctypes
start_time=time.time()

fps_mutex = Lock()

class SRUserContext(ctypes.Structure):
    _fields_ = [
        ("sessionid", ctypes.c_int),
        ("name", ctypes.c_char * 32)
    ]





# (Other constants and definitions remain unchanged)
MUXER_BATCH_TIMEOUT_USEC = 33000


def bus_call(bus, message, loop):
    t = message.type
    if t == Gst.MessageType.EOS:
        sys.stdout.write("End-of-stream\n")
        loop.quit()
    elif t==Gst.MessageType.WARNING:
        err, debug = message.parse_warning()
        sys.stderr.write("Warning: %s: %s\n" % (err, debug))
    elif t == Gst.MessageType.ERROR:
        err, debug = message.parse_error()
        sys.stderr.write("Error: %s: %s\n" % (err, debug))
        loop.quit()
    return True

def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    if not caps:
        caps = decoder_src_pad.query_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)
    print("gstname=", gstname)
    if gstname.find("video") != -1:
        print("features=", features)
        if features.contains("memory:NVMM"):
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write("Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

def create_source_bin(index, uri,file_loop, gpu_id, list_for_nvurisrcbin):
    print("Creating source bin")
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write("Unable to create source bin \n")
    if file_loop:
        uri_decode_bin=Gst.ElementFactory.make("nvurisrcbin", "uri-decode-bin")
        uri_decode_bin.set_property("file-loop", 1)
        uri_decode_bin.set_property("cudadec-memtype", 0)
    else:
        uri_decode_bin = Gst.ElementFactory.make("nvurisrcbin", "nvurisrcbin")
    if not uri_decode_bin:
        sys.stderr.write("Unable to create uri decode bin \n")
    uri_decode_bin.set_property("uri", uri)
    uri_decode_bin.set_property("source-id", index)

    
    # smart record property added added ....
    
    
    
    base_path = f"/opt/nvidia/deepstream/deepstream-7.0/nvodin24/video/{index}_{uuid.uuid4().hex[:8]}"
    uri_decode_bin.set_property("smart-record", 2)
    os.makedirs(base_path, exist_ok=True)
    uri_decode_bin.set_property("smart-rec-dir-path", base_path)
    uri_decode_bin.set_property("smart-rec-cache", 20)
    
    
    
    
    uri_decode_bin.set_property("latency", 1)
    uri_decode_bin.set_property("num-extra-surfaces", 5)
    uri_decode_bin.set_property("gpu-id", gpu_id)
    uri_decode_bin.set_property("rtsp-reconnect-interval", 180)
    uri_decode_bin.connect("sr-done", record_done, nbin)
    uri_decode_bin.set_property("uri",uri)
    list_for_nvurisrcbin.append(uri_decode_bin)
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write("Failed to add ghost pad in source bin \n")
        return None
    return nbin
def start_sr_function(element):
    sessionid = pyds.get_native_ptr(pyds.alloc_buffer(4))
    print(f"sessionid {sessionid}")
    sr_user_context_size = ctypes.sizeof(SRUserContext)
    sr_user_context_buf = pyds.get_native_ptr(pyds.alloc_buffer(sr_user_context_size))
    sr_user_context = pyds.SRUserContext.cast(sr_user_context_buf)
    sr_user_context.sessionid = 42
    sr_user_context.name = "sr-demo " + element.get_name()
    print(f"sr_user_context_buf {sr_user_context_buf} {element.get_name()}")
    element.emit('start-sr', sessionid, 5, 5, sr_user_context_buf)
    pyds.free_gbuffer(sessionid)
    print('******start sr*****')
    return True

def record_done(nvurisrcbin, recordingInfo, user_ctx, user_data):
    print('******sr done*****')
    sr = pyds.SRUserContext.cast(hash(user_ctx))
    print(f"session id {sr.sessionid} -- name {sr.name}")
    pyds.free_buffer(hash(user_ctx))

def pipeline_process(args,name, gpu_id, list_for_nvurisrcbin):
    # args is a list of camera URIs for this pipeline instance (batch)
    number_sources = len(args)
    Gst.init(None)
    print("Creating Pipeline\n")
    pipeline = Gst.Pipeline()
    is_live = False
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline\n")
    print("Creating streammux\n")
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write("Unable to create NvStreamMux\n")
    streammux.set_property("width", 1920)
    streammux.set_property("height", 1080)
    streammux.set_property("gpu-id", gpu_id)
    streammux.set_property("batch-size", number_sources)
    streammux.set_property("batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC)
    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin", i, "\n")
        uri_name = args[i]
        if uri_name.find("rtsp://") == 0:
            file_loop = False
        if uri_name.find("file://") == 0:
            file_loop = True
            print("FILE LOOP iS TRUE -----------> ", uri_name)
        source_bin = create_source_bin(i, uri_name,file_loop, gpu_id, list_for_nvurisrcbin)
        if not source_bin:
            sys.stderr.write("Unable to create source bin\n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.request_pad_simple(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin\n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin\n")
        srcpad.link(sinkpad)
    
    
    print("Creating Fakesink \n")
    pipeline_sink = Gst.ElementFactory.make("fakesink", "fakesink")
    pipeline_sink.set_property('enable-last-sample', 0)
    pipeline_sink.set_property('sync', 0)
    pipeline.add(pipeline_sink)
    streammux.link(pipeline_sink)
    
    # Create an event loop and watch for bus messages
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
   
    
    print("Starting pipeline\n")
    pipeline.set_state(Gst.State.PLAYING)
    if gpu_id==1:
        for uribin in list_for_nvurisrcbin:
            timer_id = GLib.timeout_add_seconds(10, start_sr_function, uribin)
    try:
        loop.run()
    except BaseException:
        pass
    pipeline.set_state(Gst.State.NULL)




class Server:
    
    def __init__(self):
        # Adding 20 cameras per process 
        # pls note that exh cmara should haver minimum 20-30 people object 
        camids = [
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something"
                ]


        # Wehave 2 l4 on the same cpu 
        # but the same is happening in single l4 also 
        processes = []

        for gpu_id in range(0,2):
            print("_________________________________________________________________________________________________________________________________________________________________________", gpu_id)
            for process_id in range(0,4):
                list_for_nvurisrcbin=[]
                p = multiprocessing.Process(target= pipeline_process, args=(camids,process_id, gpu_id,list_for_nvurisrcbin))
                p.start()
                processes.append(p)
        
        for p in processes:
            p.join()
            
            
Server()

@debjit.adak @snehashish.debnath @Fiona.Chen @junshengy

It may take some time to reproduce the issue. Will be back soon after we reproduce it.

1 Like
  1. Can you please upgrade to the lateste DeepStream 7.1 version?
  2. I’ve tried the attached deepstream_test_3.py which set all DeepStream components to be used on GPU1 instead of GPU 0. I don’t find any GPU 0 loading when running withe the command
    python3 deepstream_test_3.py -i rtsp://xxxxxx rtsp://xxxx --file-loop --no-display
    deepstream_test_3.py (19.0 KB)

In the code you have attached, I have not seen any start-sr signal which will start recording. That is the reason you are not facing extra process addition issue. This issue starts whenever we start recording. If possible, please use the code we attached above and experiment with start-sr signal, then you may face the issue that we are facing.

Thanks.
@fanzh @junshengy @Fiona.Chen

I’ve tried with the c/c++ sample deepstream-test3, when enable nvurisrcbin smart recording, the app can run on GPU1 totally. Please try with DeepStream 7.1 GA.

The command is

export NVDS_TEST3_PERF_MODE=1
./deepstream-test3-app rtsp://xxxx

deepstream_test3_app.c (22.0 KB)

i don’t understand .. why are using … Cpp file as we are clearly stating to use python …

If you are doing this, please trigger the start-sr signal from your .py as you have done … and guide us to set the pyds bindings for 7.1

Help us to reproduce this issue in python preferentially 7.0 if NVIDIA has not stopped supporting . Or please tell us the steps to do the same in python.

Your experiments are not bringing any conclusions.

The deepstream-test3 python app is similar to the c version. You can also try to implement the same smart recording signal emit in the python sample. The c app has told us there is no issue with nvurisrcbin. Smart recording is implemented inside nvurisrcbin.

1 Like

I got it. But, can you please verify with python sample app also once?

You can implement by yourself. The c code is a sample.

We have tested modified version of deep-stream-test3 python sample app with smart recording which I have attached above. Now, I am asking you to please experiment with python deep-stream sample with smart recording from your side to conclude the duplicate process issue is coming or not?

I’ve tried the glib python binding for signal emit, it does not work, the signal is not emitted with the python binding.

Please refer to the c code I provided and generate the python binding as you like.

Below is the wheel file of Deepstream-7.0 which consists of smart record bindings. please use it and let us know while giving start-sr signal, extra processes of GPU-1 getting added in GPU-0 or not.

wheel_file_smart_record_bindings.zip (532.8 KB)
Below is the modified version of deepstream-test3 sample app with samrt recording.

import sys
sys.path.append("../")
# from bus_call import bus_call
import pyds
import math
import time
from ctypes import *
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GLib
import configparser
import datetime
import os
import argparse
import multiprocessing
import time
from threading import Lock
from collections import defaultdict, deque
from typing import Tuple, Dict, Deque
import numpy as np
import traceback, time, copy 
import faulthandler
faulthandler.enable()
import uuid
from os import environ
import ctypes
start_time=time.time()

fps_mutex = Lock()

class SRUserContext(ctypes.Structure):
    _fields_ = [
        ("sessionid", ctypes.c_int),
        ("name", ctypes.c_char * 32)
    ]





# (Other constants and definitions remain unchanged)
MUXER_BATCH_TIMEOUT_USEC = 33000


def bus_call(bus, message, loop):
    t = message.type
    if t == Gst.MessageType.EOS:
        sys.stdout.write("End-of-stream\n")
        loop.quit()
    elif t==Gst.MessageType.WARNING:
        err, debug = message.parse_warning()
        sys.stderr.write("Warning: %s: %s\n" % (err, debug))
    elif t == Gst.MessageType.ERROR:
        err, debug = message.parse_error()
        sys.stderr.write("Error: %s: %s\n" % (err, debug))
        loop.quit()
    return True

def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    if not caps:
        caps = decoder_src_pad.query_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)
    print("gstname=", gstname)
    if gstname.find("video") != -1:
        print("features=", features)
        if features.contains("memory:NVMM"):
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write("Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

def create_source_bin(index, uri,file_loop, gpu_id, list_for_nvurisrcbin):
    print("Creating source bin")
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write("Unable to create source bin \n")
    if file_loop:
        uri_decode_bin=Gst.ElementFactory.make("nvurisrcbin", "uri-decode-bin")
        uri_decode_bin.set_property("file-loop", 1)
        uri_decode_bin.set_property("cudadec-memtype", 0)
    else:
        uri_decode_bin = Gst.ElementFactory.make("nvurisrcbin", "nvurisrcbin")
    if not uri_decode_bin:
        sys.stderr.write("Unable to create uri decode bin \n")
    uri_decode_bin.set_property("uri", uri)
    uri_decode_bin.set_property("source-id", index)

    
    # smart record property added added ....
    
    
    
    base_path = f"/opt/nvidia/deepstream/deepstream-7.0/nvodin24/video/{index}_{uuid.uuid4().hex[:8]}"
    uri_decode_bin.set_property("smart-record", 2)
    os.makedirs(base_path, exist_ok=True)
    uri_decode_bin.set_property("smart-rec-dir-path", base_path)
    uri_decode_bin.set_property("smart-rec-cache", 20)
    
    
    
    
    uri_decode_bin.set_property("latency", 1)
    uri_decode_bin.set_property("num-extra-surfaces", 5)
    uri_decode_bin.set_property("gpu-id", gpu_id)
    uri_decode_bin.set_property("rtsp-reconnect-interval", 180)
    uri_decode_bin.connect("sr-done", record_done, nbin)
    uri_decode_bin.set_property("uri",uri)
    list_for_nvurisrcbin.append(uri_decode_bin)
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write("Failed to add ghost pad in source bin \n")
        return None
    return nbin
def start_sr_function(element):
    sessionid = pyds.get_native_ptr(pyds.alloc_buffer(4))
    print(f"sessionid {sessionid}")
    sr_user_context_size = ctypes.sizeof(SRUserContext)
    sr_user_context_buf = pyds.get_native_ptr(pyds.alloc_buffer(sr_user_context_size))
    sr_user_context = pyds.SRUserContext.cast(sr_user_context_buf)
    sr_user_context.sessionid = 42
    sr_user_context.name = "sr-demo " + element.get_name()
    print(f"sr_user_context_buf {sr_user_context_buf} {element.get_name()}")
    element.emit('start-sr', sessionid, 5, 5, sr_user_context_buf)
    pyds.free_gbuffer(sessionid)
    print('******start sr*****')
    return True

def record_done(nvurisrcbin, recordingInfo, user_ctx, user_data):
    print('******sr done*****')
    sr = pyds.SRUserContext.cast(hash(user_ctx))
    print(f"session id {sr.sessionid} -- name {sr.name}")
    pyds.free_buffer(hash(user_ctx))

def pipeline_process(args,name, gpu_id, list_for_nvurisrcbin):
    # args is a list of camera URIs for this pipeline instance (batch)
    number_sources = len(args)
    Gst.init(None)
    print("Creating Pipeline\n")
    pipeline = Gst.Pipeline()
    is_live = False
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline\n")
    print("Creating streammux\n")
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write("Unable to create NvStreamMux\n")
    streammux.set_property("width", 1920)
    streammux.set_property("height", 1080)
    streammux.set_property("gpu-id", gpu_id)
    streammux.set_property("batch-size", number_sources)
    streammux.set_property("batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC)
    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin", i, "\n")
        uri_name = args[i]
        if uri_name.find("rtsp://") == 0:
            file_loop = False
        if uri_name.find("file://") == 0:
            file_loop = True
            print("FILE LOOP iS TRUE -----------> ", uri_name)
        source_bin = create_source_bin(i, uri_name,file_loop, gpu_id, list_for_nvurisrcbin)
        if not source_bin:
            sys.stderr.write("Unable to create source bin\n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.request_pad_simple(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin\n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin\n")
        srcpad.link(sinkpad)
    
    
    print("Creating Fakesink \n")
    pipeline_sink = Gst.ElementFactory.make("fakesink", "fakesink")
    pipeline_sink.set_property('enable-last-sample', 0)
    pipeline_sink.set_property('sync', 0)
    pipeline.add(pipeline_sink)
    streammux.link(pipeline_sink)
    
    # Create an event loop and watch for bus messages
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
   
    
    print("Starting pipeline\n")
    pipeline.set_state(Gst.State.PLAYING)
    if gpu_id==1:
        for uribin in list_for_nvurisrcbin:
            timer_id = GLib.timeout_add_seconds(10, start_sr_function, uribin)
    try:
        loop.run()
    except BaseException:
        pass
    pipeline.set_state(Gst.State.NULL)




class Server:
    
    def __init__(self):
        # Adding 20 cameras per process 
        # pls note that exh cmara should haver minimum 20-30 people object 
        camids = [
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something",
                    "rtsp://xx.xx.x.xxx:yyyyy/something"
                ]


        # Wehave 2 l4 on the same cpu 
        # but the same is happening in single l4 also 
        processes = []

        for gpu_id in range(0,2):
            print("_________________________________________________________________________________________________________________________________________________________________________", gpu_id)
            for process_id in range(0,4):
                list_for_nvurisrcbin=[]
                p = multiprocessing.Process(target= pipeline_process, args=(camids,process_id, gpu_id,list_for_nvurisrcbin))
                p.start()
                processes.append(p)
        
        for p in processes:
            p.join()
            
            
Server()

Please someone replicate with python smart record through above wheel file.
@fanzh @junshengy

Please provide the source code of your binding.

Below .zip file consists of 4 files which we have changed to build smart record bindings.
smart_record_bindings.zip (11.2 KB)
Below are the paths for all those 4 files.

pyds.cpp-----------------> sources/ deepstream-python-apps/ bindings/src
utils.cpp------------------> sources/ deepstream-python-apps/ bindings/src
bindfunctions.cpp ---------------------> sources/ deepstream-python-apps/ bindings/src
utils.hpp-------------------> sources/ deepstream-python-apps/ bindings/include

Have you checked our .cpp changes and replicated with our smart record bindings?

we are using deepstream-7.0 for running this
So when I compile the c file like this gcc deepstream_test3_app.c -o alert
I get this issue

deepstream_test3_app.c:13:11: fatal error: gst/gst.h: No such file or directory
   13 |  #include <gst/gst.h>
      |           ^~~~~~~~~~~
compilation terminated.

So I tried to run it this way gcc -o deepstream_test3_app deepstream_test3_app.c $(pkg-config --cflags --libs gstreamer-1.0 glib-2.0)
and still got issue. So I finally created a Makefile which looks like this `

# Compiler
CC = gcc

# Paths - Update if DeepStream version/path is different
CUDA_PATH = /usr/local/cuda
DEEPSTREAM_PATH = /opt/nvidia/deepstream/deepstream-7.0

# Include paths
CFLAGS = \
  -I/usr/include/gstreamer-1.0 \
  -I/usr/include/glib-2.0 \
  -I/usr/lib/x86_64-linux-gnu/glib-2.0/include \
  -I$(CUDA_PATH)/include \
  -I$(DEEPSTREAM_PATH)/sources/includes

# Library paths
LDFLAGS = \
  -L$(CUDA_PATH)/lib64 \
  -L$(DEEPSTREAM_PATH)/lib

# Libraries
LIBS = \
  -lcudart \
  -lnvds_meta \
  -lnvds_utils \
  -lgstreamer-1.0 \
  -lgobject-2.0 \
  -lglib-2.0 \
  -lm

# Source and target
SRC = deepstream_test3_app.c
TARGET = deepstream_test3_app

# Arguments passed to the program
ARGS = $(ARGS)

# Build target
all: $(TARGET)

$(TARGET): $(SRC)
	$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) $(LIBS)

# Run the application with input arguments
run: $(TARGET)
	./$(TARGET) $(ARGS)

# Clean
clean:
	rm -f $(TARGET)


and run using make run ARGS="rtsp URL "

I get this issue

/usr/bin/ld: /tmp/ccBmGe5Q.o: in function `tiler_src_pad_buffer_probe':
deepstream_test3_app.c:(.text+0x59): undefined reference to `gst_buffer_get_nvds_batch_meta'
/usr/bin/ld: /tmp/ccBmGe5Q.o: in function `bus_call':
deepstream_test3_app.c:(.text+0x2db): undefined reference to `gst_nvmessage_is_stream_eos'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x2f9): undefined reference to `gst_nvmessage_parse_stream_eos'
/usr/bin/ld: /tmp/ccBmGe5Q.o: in function `main':
deepstream_test3_app.c:(.text+0xf0f): undefined reference to `nvds_parse_gie_type'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x1007): undefined reference to `nvds_parse_source_list'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x165a): undefined reference to `nvds_parse_streammux'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x16a3): undefined reference to `nvds_parse_gie'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x178e): undefined reference to `nvds_parse_osd'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x18ed): undefined reference to `nvds_parse_tiler'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x1940): undefined reference to `nvds_parse_fake_sink'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x1997): undefined reference to `nvds_parse_3d_sink'
/usr/bin/ld: deepstream_test3_app.c:(.text+0x19e4): undefined reference to `nvds_parse_egl_sink'
collect2: error: ld returned 1 exit status
make: *** [Makefile:42: deepstream_test3_app] Error 1

can you please address how I can solve and run this .

Please copy the d file into /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test3 and refer to /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test3/README for how to compile and run

means whatever .c file you provided?

Yes. The deepstream_test3_app.c file

1 Like

It’s in the same path and followed the README steps. above are the issues after following README.