Hey Folks,
I am trying to use my Gstreamer-Deepstream Pipeline as follow into a Python Wrapper using Gst-Python my command line Pipeline works well for same input stream of local file, but my python Wrapper seems to be loading model and config file But i see nothing saved on the output.mkv , I am using filesink also i dont see any errors can someone suggest me something. Thanks in advance !!!
My goals are
• Use my gst-python wrapper to work on my input video stream and save the output video
• write the intermediate output of the NvSegVisual to a numpy array or any Cv2 dataform for post analysis
Gstreamer Command line pipeline
sudo gst-launch-1.0 filesrc location = 934.mp4 ! qtdemux ! h264parse ! nvv4l2decoder ! m.sink_0 nvstreammux name=m batch-size=1 width=1920 height=1080 ! nvvideoconvert ! nvdspreprocess config-file= /opt/nvidia/deepstream/deepstream/sources/gst-plugins/gst-nvdspreprocess/config_preprocess.txt ! nvinferbin config-file-path= /opt/nvidia/deepstream/deepstream-6.0/samples/configs/tao_pretrained_models/nv_seg_tao_unet_config.txt ! nvsegvisual ! nvvideoconvert ! nvv4l2h264enc ! h264parse ! 'video/x-h264,stream-format=avc' ! matroskamux ! filesink location=gst_out.mkv
Python Wrapper
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GObject
import sys
#sys.path.append('../')
import gi
import math
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
import cv2
#import pyds
#import numpy as np
#import os.path
from os import path
### Could be Ignored sample Def for writing nvsegvisual output to a file
def seg_src_pad_buffer_probe(pad, info, u_data):
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number = frame_meta.frame_num
l_user = frame_meta.frame_user_meta_list
while l_user is not None:
try:
# Note that l_user.data needs a cast to pyds.NvDsUserMeta
# The casting is done by pyds.NvDsUserMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
seg_user_meta = pyds.NvDsUserMeta.cast(l_user.data)
except StopIteration:
break
if seg_user_meta and seg_user_meta.base_meta.meta_type == \
pyds.NVDSINFER_SEGMENTATION_META:
try:
# Note that seg_user_meta.user_meta_data needs a cast to
# pyds.NvDsInferSegmentationMeta
# The casting is done by pyds.NvDsInferSegmentationMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
segmeta = pyds.NvDsInferSegmentationMeta.cast(seg_user_meta.user_meta_data)
except StopIteration:
break
# Retrieve mask data in the numpy format from segmeta
# Note that pyds.get_segmentation_masks() expects object of
# type NvDsInferSegmentationMeta
masks = pyds.get_segmentation_masks(segmeta)
masks = np.array(masks, copy=True, order='C')
# map the obtained masks to colors of 2 classes.
frame_image = map_mask_as_display_bgr(masks)
cv2.imwrite(folder_name + "/" + str(frame_number) + ".jpg", frame_image)
try:
l_user = l_user.next
except StopIteration:
break
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
#### Main Pipeline
def main():
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
source = Gst.ElementFactory.make("filesrc", "video-source")
source.set_property("location", "34.mp4")
pipeline.add(source)
print('Source loaded')
print(source)
parse = Gst.ElementFactory.make("h264parse", "parse")
pipeline.add(parse)
print('H264parse loaded')
decoder = Gst.ElementFactory.make("nvv4l2decoder", "decoder")
#decoder.set_property("drop-frame-interval", 0)
pipeline.add(decoder)
parse.link(decoder)
print('NVV4l2Decoder loaded')
mux = Gst.ElementFactory.make("nvstreammux", "mux")
mux.get_request_pad("m.sink_0")
mux.set_property("name", 'm')
mux.set_property("batch-size", 1)
mux.set_property("width", 1920)
mux.set_property("height", 1080)
pipeline.add(mux)
decoder.link(mux)
print('Streammux loaded')
convert = Gst.ElementFactory.make("nvvideoconvert", "convert")
pipeline.add(convert)
mux.link(convert)
print('NvvideoConvert loaded')
nvds = Gst.ElementFactory.make("nvdspreprocess", "preprocess")
nvds.set_property("config-file", "/opt/nvidia/deepstream/deepstream/sources/gst-plugins/gst-nvdspreprocess/config_preprocess.txt")
pipeline.add(nvds)
convert.link(nvds)
print('NVDSPreprocess loaded')
infer = Gst.ElementFactory.make("nvinferbin", "infer")
infer.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream-6.0/samples/configs/tao_pretrained_models/nv_seg_tao_unet_config.txt")
pipeline.add(infer)
nvds.link(infer)
print('INFERBIN loaded')
print(infer)
seg = Gst.ElementFactory.make("nvsegvisual", "seg")
pipeline.add(seg)
infer.link(seg)
print('NvSegVisual loaded')
print(type(seg))
#convert2 = Gst.ElementFactory.make("nvvideoconvert", "convert")
#pipeline.add(convert2)
seg.link(convert)
print('NvvideoConvert loaded')
enc = Gst.ElementFactory.make("nvv4l2h264enc", "enc")
pipeline.add(enc)
convert.link(enc)
print('NVV4L2H264ENC loaded')
enc.link(parse)
print('H264Parse loaded')
caps = Gst.Caps.from_string("video/x-h264,stream-format=avc")
filter = Gst.ElementFactory.make("capsfilter", "filter")
filter.set_property("caps", caps)
pipeline.add(filter)
parse.link(filter)
print('FomatConvert loaded')
#queue = Gst.ElementFactory.make("queue", "queue")
#pipeline.add(queue)
#filter.link(queue)
mkv = Gst.ElementFactory.make("matroskamux", "mkv")
pipeline.add(mkv)
filter.link(mkv)
print('MatroSkamux loaded')
sink = Gst.ElementFactory.make("filesink", "video-sink")
sink.set_property("location", 'sample_out.mkv')
#sink.set_property("window-y", 0)
#sink.set_property("window-width", 1280)
#sink.set_property("window-height", 720)
pipeline.add(sink)
mkv.link(sink)
print('FileSink loaded')
# create an event loop and feed gstreamer bus mesages to it
#loop = GLib.MainLoop()
#bus = pipeline.get_bus()
#bus.add_signal_watch()
#bus.connect("message", bus_call, loop)
'''
# Lets add probe to get informed of the meta data generated, we add probe to
# the src pad of the inference element
seg_src_pad = seg.get_static_pad("src")
if not seg_src_pad:
sys.stderr.write(" Unable to get src pad \n")
else:
seg_src_pad.add_probe(Gst.PadProbeType.BUFFER, seg_src_pad_buffer_probe, 0)
loop = GObject.MainLoop()
'''
loop = GObject.MainLoop()
pipeline.set_state(Gst.State.PLAYING)
print('END-PipeLine ')
'''
ct=0
try:
loop.run()
ct +=1
print(ct)
except:
pass
'''
pipeline.set_state(Gst.State.NULL)
if __name__ == "__main__":
main()
Output Log
Source loaded
<__gi__.GstFileSrc object at 0x7f3ce48bceb0 (GstFileSrc at 0x56077e92a320)>
H264parse loaded
NVV4l2Decoder loaded
Streammux loaded
NvvideoConvert loaded
NVDSPreprocess loaded
INFERBIN loaded
<__gi__.GstDsNvInferBin object at 0x7f3ce48c07d0 (GstDsNvInferBin at 0x56077f21e040)>
NvSegVisual loaded
<class '__gi__.GstNvSegVisual'>
NvvideoConvert loaded
NVV4L2H264ENC loaded
H264Parse loaded
FomatConvert loaded
MatroSkamux loaded
FileSink loaded
0:00:17.168951836 10445 0x56077ffe6a40 INFO nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<nvinfer_bin_nvinfer> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-6.0/samples/configs/tao_pretrained_models/CA_CD.etlt_b1_gpu0_fp32.engine
WARNING: [TRT]: The getMaxBatchSize() function should not be used with an engine built from a network created with NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag. This function will always return 1.
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:610 [Implicit Engine Info]: layers num: 2
0 INPUT kFLOAT input_1 3x512x512
1 OUTPUT kFLOAT softmax_1 512x512x3
0:00:17.202368243 10445 0x56077ffe6a40 INFO nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<nvinfer_bin_nvinfer> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-6.0/samples/configs/tao_pretrained_models/CA_CD.etlt_b1_gpu0_fp32.engine
0:00:17.215687040 10445 0x56077ffe6a40 INFO nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<nvinfer_bin_nvinfer> [UID 1]: Load new model:/opt/nvidia/deepstream/deepstream-6.0/samples/configs/tao_pretrained_models/nv_seg_tao_unet_config.txt sucessfully
END-PipeLine
• Hardware Platform
Tesla T4
• DeepStream Version
deepstream-app version 6.0.1
DeepStreamSDK 6.0.1
CUDA Driver Version: 11.4
CUDA Runtime Version: 11.4
TensorRT Version: 8.4
cuDNN Version: 8.4
libNVWarp360 Version: 2.0.1d3
gst-launch-1.0 version 1.20.3
GStreamer 1.20.3