Please provide complete information as applicable to your setup.
• Hardware Platform - GPU
• DeepStream Version - 7.0
• TensorRT Version - 10.6.0
**• NVIDIA GPU Driver Version - 560.35.03 **
• Issue Type - questions
**• How to reproduce the issue ? **
In my DeepStream application, I’ve implemented custom code to create a JSON object and send it to Azure IoT Hub using the azure-iot-hub
Python package. While the code works fine initially, after 6-7 hours, I receive the PERF {stream:0.0}
message. I’m not sure what causes this issue, but I found that commenting out the code responsible for sending messages to Azure IoT Hub resolves the problem. It seems that this issue is not exclusive to the azure-iot-hub
package—if I add any other connections, such as sending data to Event Hub or Azure Blob Storage, I encounter the same problem. I also checked the system’s memory, CPU, and GPU usage, and everything remains stable, so I’m unsure why this message occurs.
code :
import sys
sys.path.append('../')
import gi
import configparser
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from ctypes import *
import time
import sys
import math
import platform
from common.platform_info import PlatformInfo
import os
import pytz
from shapely.geometry import Point,Polygon
from datetime import datetime
import json
from common.bus_call import bus_call
from common.FPS import PERF_DATA
import argparse
import yaml
from utils import read_yaml,get_current_epoch_in_syd,epoch_to_datetime_str,convert_syd_epoch_to_datetime_str,send_message_to_iothub
import pyds
import multiprocessing
from send_to_blob import monitor_and_upload_videos
perf_data = None
MAX_DISPLAY_LEN=64
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
MUXER_OUTPUT_WIDTH=1920
MUXER_OUTPUT_HEIGHT=1080
MUXER_BATCH_TIMEOUT_USEC = 33000
TILED_OUTPUT_WIDTH=1280
TILED_OUTPUT_HEIGHT=720
GST_CAPS_FEATURES_NVMM="memory:NVMM"
OSD_PROCESS_MODE= 0
OSD_DISPLAY_TEXT= 1
pgie_classes_str= ["Vehicle", "TwoWheeler", "Person","RoadSign"]
syd_tz = pytz.timezone('Australia/Sydney')
yaml_con = read_yaml("main_config.yaml")
cap_fps = int(yaml_con["fps"])
json_write_ther = int(yaml_con["json_write_ther"])
base_json_dir_name = yaml_con["base_json_dir_name"]
base_video_dir_name = yaml_con["base_video_dir_name"]
sec_to_get_data = int(yaml_con["sec_to_get_data"])
iot_hub_connection_string = yaml_con["iot_hub_connection_string"]
no_min_to_write_video = int(yaml_con["no_min_to_write_video"])
def parse_args():
parser = argparse.ArgumentParser(prog="deepstream_demux_multi_in_multi_out.py",
description="deepstream-demux-multi-in-multi-out takes multiple URI streams as input" \
"and uses `nvstreamdemux` to split batches and output separate buffer/streams")
parser.add_argument(
"-i",
"--input",
help="Path to input streams",
nargs="+",
metavar="URIs",
default=["a"],
required=True,
)
args = parser.parse_args()
stream_paths = args.input
return stream_paths
args = parse_args()
print(f"=================================> {args}")
# cap_fps = 8
# base_json_dir_name = "jsons"
# base_video_dir_name = "videos"
# json_write_ther = 60
# sec_to_get_data = 1
# iot_hub_connection_string = "connection-str"
# no_min_to_write_video = 300
one_min_counter = 60*cap_fps
start_time = {i:get_current_epoch_in_syd() for i in range(len(args))}
one_min_start_time = {i:get_current_epoch_in_syd() for i in range(len(args))}
current_date = datetime.fromtimestamp(get_current_epoch_in_syd(), syd_tz).strftime('%Y-%m-%d')
video_dir_paths = {i:os.path.join(base_video_dir_name,f"{i}",current_date) for i in range(len(args))}
dir_names = {i:os.path.join(base_json_dir_name,f"{i}",current_date) for i in range(len(args))}
[os.makedirs(dir_names[i],exist_ok=True) for i in range(len(args))]
[os.makedirs(video_dir_paths[i],exist_ok=True) for i in range(len(args)) ]
data_dict = { i:{"people_crossing":[],"people_crossing_frame":[x],"people_count":[]} for i in range(len(args)) }
camera_ids = {idx:cam_id for idx,cam_id in enumerate(["C1085","C2034","C2035"])}
data_dict_1_min = { i:{"people_crossing_cumulative":[],"people_crossing_frame":[],"people_count":[],"track_ids":[],"camera_id":camera_ids[i]} for i in range(len(args)) }
object_track_id_conf = {i:{} for i in range(len(args)) }
def make_elm_or_print_err(factoryname, name, printedname, detail=""):
""" Creates an element with Gst Element Factory make.
Return the element if successfully created, otherwise print
to stderr and return None.
"""
print("Creating", printedname)
elm = Gst.ElementFactory.make(factoryname, name)
if not elm:
sys.stderr.write("Unable to create " + printedname + " \n")
if detail:
sys.stderr.write(detail)
return elm
def make_element(element_name, i):
"""
Creates a Gstreamer element with unique name
Unique name is created by adding element type and index e.g. `element_name-i`
Unique name is essential for all the element in pipeline otherwise gstreamer will throw exception.
:param element_name: The name of the element to create
:param i: the index of the element in the pipeline
:return: A Gst.Element object
"""
element = Gst.ElementFactory.make(element_name, element_name)
if not element:
sys.stderr.write(" Unable to create {0}".format(element_name))
element.set_property("name", "{0}-{1}".format(element_name, str(i)))
return element
def nvanalytics_src_pad_buffer_probe(pad,info,u_data):
global start_time
# global data_dict_1_min
frame_number=0
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
l_obj=frame_meta.obj_meta_list
num_rects = frame_meta.num_obj_meta
obj_counter = {
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_PERSON:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_ROADSIGN:0
}
print("#"*50)
stream_id = frame_meta.pad_index
while l_obj:
try:
# Note that l_obj.data needs a cast to pyds.NvDsObjectMeta
# The casting is done by pyds.NvDsObjectMeta.cast()
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
track_id = obj_meta.object_id
conf = obj_meta.confidence
if track_id not in object_track_id_conf[stream_id]:
object_track_id_conf[stream_id][track_id] = [conf]
else:
object_track_id_conf[stream_id][track_id].append(conf)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
l_user_meta = obj_meta.obj_user_meta_list
# Extract object level meta data from NvDsAnalyticsObjInfo
while l_user_meta:
try:
user_meta = pyds.NvDsUserMeta.cast(l_user_meta.data)
if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSOBJ.USER_META"):
user_meta_data = pyds.NvDsAnalyticsObjInfo.cast(user_meta.user_meta_data)
if user_meta_data.dirStatus: print("Object {0} moving in direction: {1}".format(obj_meta.object_id, user_meta_data.dirStatus))
if user_meta_data.lcStatus: print("Object {0} line crossing status: {1}".format(obj_meta.object_id, user_meta_data.lcStatus))
if user_meta_data.ocStatus: print("Object {0} overcrowding status: {1}".format(obj_meta.object_id, user_meta_data.ocStatus))
if user_meta_data.roiStatus: print("Object {0} roi status: {1}".format(obj_meta.object_id, user_meta_data.roiStatus))
except StopIteration:
break
try:
l_user_meta = l_user_meta.next
except StopIteration:
break
try:
l_obj=l_obj.next
except StopIteration:
break
# Get meta data from NvDsAnalyticsFrameMeta
l_user = frame_meta.frame_user_meta_list
counter = 0
while l_user:
try:
user_meta = pyds.NvDsUserMeta.cast(l_user.data)
if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSFRAME.USER_META"):
user_meta_data = pyds.NvDsAnalyticsFrameMeta.cast(user_meta.user_meta_data)
if user_meta_data.objInROIcnt:
print("Objs in ROI: {0}".format(user_meta_data.objInROIcnt))
if user_meta_data.objLCCumCnt: print("Linecrossing Cumulative: {0}".format(user_meta_data.objLCCumCnt))
if user_meta_data.objLCCurrCnt: print("Linecrossing Current Frame: {0}".format(user_meta_data.objLCCurrCnt))
if user_meta_data.ocStatus: print("Overcrowding status: {0}".format(user_meta_data.ocStatus))
except StopIteration:
break
try:
l_user = l_user.next
except StopIteration:
break
# track_ids_json_path = os.path.join(f"track_ids_{stream_id}",f"track_ids_{stream_id}_{frame_number}.json")
# with open(track_ids_json_path, 'w') as f:
# json.dump(object_track_id_conf, f,indent=4)
stream_index = "stream{0}".format(stream_id)
data_dict[stream_id]["people_count"].append({"data":user_meta_data.objInROIcnt['RF']
})
if user_meta_data.objLCCumCnt:
data_dict[stream_id]["people_crossing"].append({"data":user_meta_data.objLCCumCnt})
current_epoch = get_current_epoch_in_syd()
current_time_str = convert_syd_epoch_to_datetime_str(current_epoch)
try:
if (get_current_epoch_in_syd() - start_time[stream_id]) >= sec_to_get_data:
people_count = [ d["data"] for d in data_dict[stream_id]["people_count"] ]
track_ids = [{track_id : sum(confs)/len(confs)} for track_id,confs in object_track_id_conf[stream_id].items()]
data_dict_1_min[stream_id]["track_ids"].append({"track_ids":track_ids,"current_time_str":current_time_str})
people_count_avg = sum(people_count)/len(people_count)
people_count_max = max(people_count)
people_count_min = min(people_count)
data_dict_1_min[stream_id]["people_count"].append({"data":{"avg":people_count_avg,"max":people_count_max,
"min":people_count_min},"current_time_str":current_time_str,"frame_no":frame_number})
if user_meta_data.objLCCumCnt:
people_crossing_entry = [ d["data"]["Entry"] for d in data_dict[stream_id]["people_crossing"] ]
people_crossing_exit = [ d["data"]["Exit"] for d in data_dict[stream_id]["people_crossing"] ]
# people_count = sum(people_count)/len(people_count)
people_crossing_entry_max = max(people_crossing_entry)
people_crossing_entry_min = min(people_crossing_entry)
people_crossing_entry_avg = sum(people_crossing_entry)/len(people_crossing_entry)
people_crossing_exit_max = max(people_crossing_exit)
people_crossing_exit_min = min(people_crossing_exit)
people_crossing_exit_avg = sum(people_crossing_exit)/len(people_crossing_exit)
data_dict_1_min[stream_id]["people_crossing_cumulative"].append({"data":{"entry":{"avg":people_crossing_entry_avg,"max":people_crossing_entry_max,
"min":people_crossing_entry_min},"exit":{"avg":people_crossing_exit_avg,"max":people_crossing_exit_max,
"min":people_crossing_exit_min}},"current_time_str":current_time_str,"frame_no":frame_number})
# data_dict_1_min[stream_id]["people_count"].append({"data":people_count,"current_time_str":current_time_str})
data_dict_1_min[stream_id]["people_crossing_frame"].append(
{"data":user_meta_data.objLCCurrCnt,"current_time_str":current_time_str,"frame_no":frame_number}
)
# else:
# people_count = sum(people_count)/len(people_count)
# data_dict_1_min[stream_id]["people_count"].append({"data":people_count,"current_time_str":current_time_str})
data_dict[stream_id]["people_count"] = []
data_dict[stream_id]["people_crossing"] = []
data_dict[stream_id]["people_crossing_frame"] = []
object_track_id_conf[stream_id] = {}
start_time[stream_id] = get_current_epoch_in_syd()
data_dict_1_min[stream_id]["frame_no"] = frame_number
if (get_current_epoch_in_syd() - one_min_start_time[stream_id]) >= json_write_ther:
file_path = os.path.join(dir_names[stream_id],f"{current_time_str}.json")
with open(file_path,"w") as f:
json.dump(data_dict_1_min[stream_id],f,indent=4)
s= time.time()
send_message_to_iothub(iot_hub_connection_string,json.dumps(data_dict_1_min[stream_id]))
print(f"\n =================================== total time taken send the msg to iot hub {(time.time() - s):.2} =================================== \n")
data_dict_1_min[stream_id]["people_count"] = []
data_dict_1_min[stream_id]["people_crossing"] = []
data_dict_1_min[stream_id]["people_crossing_frame"] = []
data_dict_1_min[stream_id]["people_crossing_cumulative"] = []
data_dict_1_min[stream_id]["track_ids"] = []
one_min_start_time[stream_id] = get_current_epoch_in_syd()
print(f"data saved successfully to file {file_path}")
except Exception as e:
raise e
print("Frame Number=", frame_number, "stream id=", frame_meta.pad_index, "Number of Objects=",num_rects,"Vehicle_count=",obj_counter[PGIE_CLASS_ID_VEHICLE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON])
# Update frame rate through this probe
stream_index = "stream{0}".format(frame_meta.pad_index)
global perf_data
perf_data.update_fps(stream_index)
try:
l_frame=l_frame.next
except StopIteration:
break
print("#"*50)
return Gst.PadProbeReturn.OK
def cb_newpad(decodebin, decoder_src_pad,data):
print("In cb_newpad\n")
caps=decoder_src_pad.get_current_caps()
gststruct=caps.get_structure(0)
gstname=gststruct.get_name()
source_bin=data
features=caps.get_features(0)
# Need to check if the pad created by the decodebin is for video and not
# audio.
print("gstname=",gstname)
if(gstname.find("video")!=-1):
# Link the decodebin pad only if decodebin has picked nvidia
# decoder plugin nvdec_*. We do this by checking if the pad caps contain
# NVMM memory features.
print("features=",features)
if features.contains("memory:NVMM"):
# Get the source bin ghost pad
bin_ghost_pad=source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(decoder_src_pad):
sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
else:
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")
def decodebin_child_added(child_proxy,Object,name,user_data):
print("Decodebin child added:", name, "\n")
if(name.find("decodebin") != -1):
Object.connect("child-added",decodebin_child_added,user_data)
def create_source_bin(index,uri):
print("Creating source bin")
# Create a source GstBin to abstract this bin's content from the rest of the
# pipeline
bin_name="source-bin-%02d" %index
print(bin_name)
nbin=Gst.Bin.new(bin_name)
if not nbin:
sys.stderr.write(" Unable to create source bin \n")
# Source element for reading from the uri.
# We will use decodebin and let it figure out the container format of the
# stream and the codec and plug the appropriate demux and decode plugins.
uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
if not uri_decode_bin:
sys.stderr.write(" Unable to create uri decode bin \n")
# We set the input uri to the source element
uri_decode_bin.set_property("uri",uri)
# Connect to the "pad-added" signal of the decodebin which generates a
# callback once a new pad for raw data has beed created by the decodebin
uri_decode_bin.connect("pad-added",cb_newpad,nbin)
uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
# We need to create a ghost pad for the source bin which will act as a proxy
# for the video decoder src pad. The ghost pad will not have a target right
# now. Once the decode bin creates the video decoder and generates the
# cb_newpad callback, we will set the ghost pad target to the video decoder
# src pad.
Gst.Bin.add(nbin,uri_decode_bin)
bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
if not bin_pad:
sys.stderr.write(" Failed to add ghost pad in source bin \n")
return None
return nbin
def format_location_callback(splitmuxsink, fragment_id, video_dir_path, stream_id):
# Get the current date and time
current_datetime = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Construct the filename with the current datetime
return os.path.join(video_dir_path, f"stream_{stream_id}_rtsp_out_{current_datetime}.mp4")
def main(args):
input_sources = args
number_sources = len(input_sources)
global perf_data
perf_data = PERF_DATA(number_sources)
platform_info = PlatformInfo()
# Standard GStreamer initialization
Gst.init(None)
platform_info = PlatformInfo()
Gst.init(None)
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
is_live = False
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
pipeline.add(streammux)
for i in range(number_sources):
print("Creating source_bin ",i," \n ")
uri_name=args[i]
if uri_name.find("rtsp://") == 0 :
is_live = True
source_bin=create_source_bin(i, uri_name)
if not source_bin:
sys.stderr.write("Unable to create source bin \n")
pipeline.add(source_bin)
padname="sink_%u" %i
sinkpad= streammux.request_pad_simple(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad=source_bin.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', number_sources)
streammux.set_property('batched-push-timeout', MUXER_BATCH_TIMEOUT_USEC)
queue1=Gst.ElementFactory.make("queue","queue1")
queue2=Gst.ElementFactory.make("queue","queue2")
queue3=Gst.ElementFactory.make("queue","queue3")
queue4=Gst.ElementFactory.make("queue","queue4")
queue5=Gst.ElementFactory.make("queue","queue5")
queue6=Gst.ElementFactory.make("queue","queue6")
queue7=Gst.ElementFactory.make("queue","queue7")
pipeline.add(queue1)
pipeline.add(queue2)
pipeline.add(queue3)
pipeline.add(queue4)
pipeline.add(queue5)
pipeline.add(queue6)
pipeline.add(queue7)
print("Creating Pgie")
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
pgie.set_property('config-file-path', "dsnvanalytics_pgie_config.txt")
pgie_batch_size=pgie.get_property("batch-size")
if(pgie_batch_size != number_sources):
print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
pgie.set_property("batch-size",number_sources)
print("Creating nvtracker")
tracker = Gst.ElementFactory.make("nvtracker", "tracker")
if not tracker:
sys.stderr.write(" Unable to create tracker \n")
#Set properties of tracker
config = configparser.ConfigParser()
config.read('dsnvanalytics_tracker_config.txt')
config.sections()
for key in config['tracker']:
if key == 'tracker-width' :
tracker_width = config.getint('tracker', key)
tracker.set_property('tracker-width', tracker_width)
if key == 'tracker-height' :
tracker_height = config.getint('tracker', key)
tracker.set_property('tracker-height', tracker_height)
if key == 'gpu-id' :
tracker_gpu_id = config.getint('tracker', key)
tracker.set_property('gpu_id', tracker_gpu_id)
if key == 'll-lib-file' :
tracker_ll_lib_file = config.get('tracker', key)
tracker.set_property('ll-lib-file', tracker_ll_lib_file)
if key == 'll-config-file' :
tracker_ll_config_file = config.get('tracker', key)
tracker.set_property('ll-config-file', tracker_ll_config_file)
print("Creating nvdsanalytics")
nvanalytics = Gst.ElementFactory.make("nvdsanalytics", "analytics")
if not nvanalytics:
sys.stderr.write(" Unable to create nvanalytics \n")
nvanalytics.set_property("config-file", "config_nvdsanalytics.txt")
print("Creating nvvidconv")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd ")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
nvosd.set_property('process-mode',OSD_PROCESS_MODE)
nvosd.set_property('display-text',OSD_DISPLAY_TEXT)
print("Creating nvstreamdemux")
nvstreamdemux = Gst.ElementFactory.make("nvstreamdemux", "nvstreamdemux")
if not nvstreamdemux:
sys.stderr.write(" Unable to create nvstreamdemux \n")
pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(nvanalytics)
pipeline.add(nvstreamdemux)
streammux.link(queue1)
queue1.link(pgie)
pgie.link(tracker)
tracker.link(nvanalytics)
nvanalytics.link(nvstreamdemux)
for i in range(number_sources):
queue = make_element("queue", i)
pipeline.add(queue)
nvvideoconvert = make_element("nvvideoconvert", i)
pipeline.add(nvvideoconvert)
nvdsosd = make_element("nvdsosd", i)
pipeline.add(nvdsosd)
nvdsosd.set_property("process-mode", OSD_PROCESS_MODE)
nvdsosd.set_property("display-text", OSD_DISPLAY_TEXT)
# connect nvstreamdemux -> queue
padname = "src_%u" % i
demuxsrcpad = nvstreamdemux.request_pad_simple(padname)
if not demuxsrcpad:
sys.stderr.write("Unable to create demux src pad \n")
queuesinkpad = queue.get_static_pad("sink")
if not queuesinkpad:
sys.stderr.write("Unable to create queue sink pad \n")
demuxsrcpad.link(queuesinkpad)
#save video
queue10 = make_element("queue", f"queue10_{i}")
nvvidconv2 = make_element("nvvideoconvert", f"nvvideoconvert_{i}")
capsfilter = make_element("capsfilter", f"capsfilter_{i}")
caps = Gst.Caps.from_string("video/x-raw, format=I420")
capsfilter.set_property("caps", caps)
encoder = make_element("avenc_mpeg4", f"encode_{i}")
encoder.set_property("bitrate", 2000000)
codeparser = make_element("mpeg4videoparse", f"mpeg4-parser_{i}")
# splitmuxsink for rtsp save data every 1 min
splitmuxsink = Gst.ElementFactory.make("splitmuxsink", f"splitmuxsink_{i}")
if not splitmuxsink:
sys.stderr.write("Unable to create splitmuxsink\n")
current_date = datetime.now().strftime("%Y-%m-%d")
# fn = os.path.join(video_dir_path,"stream_" + str(i) + "_rtsp_out_%03d.mp4")
# fn = os.path.join(video_dir_path, f"stream_{i}_rtsp_out_{current_date}.mp4")
# splitmuxsink.set_property("location",
# fn)
splitmuxsink.connect("format-location", format_location_callback, video_dir_paths[i], i)
splitmuxsink.set_property("max-size-time", (no_min_to_write_video*60) * Gst.SECOND) # 1-minute file duration
muxer = Gst.ElementFactory.make("qtmux", "mp4mux")
splitmuxsink.set_property("muxer", muxer) # Assign the muxer to splitmuxsink
pipeline.add(queue10)
pipeline.add(nvvidconv2)
pipeline.add(capsfilter)
pipeline.add(encoder)
pipeline.add(codeparser)
pipeline.add(splitmuxsink)
queue.link(nvvideoconvert)
nvvideoconvert.link(nvdsosd)
nvdsosd.link(queue10)
queue10.link(nvvidconv2)
nvvidconv2.link(capsfilter)
capsfilter.link(encoder)
encoder.link(codeparser)
codeparser.link(splitmuxsink)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
nvanalytics_src_pad=nvanalytics.get_static_pad("src")
if not nvanalytics_src_pad:
sys.stderr.write(" Unable to get src pad \n")
else:
nvanalytics_src_pad.add_probe(Gst.PadProbeType.BUFFER,
nvanalytics_src_pad_buffer_probe, 0)
# perf callback function to print fps every 5 sec
GLib.timeout_add(5000, perf_data.perf_print_callback)
# List the sources
print("Now playing...")
for i, source in enumerate(args):
if (i != 0):
print(i, ": ", source)
print("Starting pipeline \n")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
if __name__ == "__main__":
stream_paths = parse_args()
connection_string = "conection_str"
container_name = "videoanalytics"
blob_video_path = "deepstream-immigration"
chunk_mb_size = 10
deep_stream_process = multiprocessing.Process(target=main,args=(stream_paths,))
send_video_process = multiprocessing.Process(target=monitor_and_upload_videos,args=("videos_test_29/*/*/*.mp4", connection_string, container_name, chunk_mb_size,blob_video_path))
deep_stream_process.start()
send_video_process.start()
deep_stream_process.join()
send_video_process.join()
# sys.exit()