Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) GPU/3070
• DeepStream Version 6.1.1
• JetPack Version (valid for Jetson only)
• TensorRT Version 8.6
• NVIDIA GPU Driver Version (valid for GPU only) 525
• Issue Type( questions, new requirements, bugs) deepstream,gstreamer
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)
Hello:
I’m a deepstream developer. In my project, I use deepstream/gstreamer to read a mp4 video file ,drop some frame and write to mp4 file. But I find a problem that I can only drop frames, and timestamp still in file. Here is a example.
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst,GObject
import pyds
import cv2
import os
result_bitrate = 300000
class ConvertVideo(object):
def __init__(self):
Gst.init(None)
self.result_filename = "draw.mp4"
self.pipeline = Gst.Pipeline()
self.src_name = "file://./origin.mp4"
self.sink_sync = 0
def drop_frame(self,frame_idx):
if (int(frame_idx)>50 and int(frame_idx)<100) or (int(frame_idx)>200 and int(frame_idx)<250) :
return True
else:
return False
def bus_call(self,bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
self.pipeline.send_event(Gst.Event.new_eos())
sys.stdout.write("End-of-stream\n")
self.loop.quit()
elif t==Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write("Warning: %s: %s\n" % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
loop.quit()
return True
def make_elm_or_print_err(self,factoryname, name, detail=""):
""" Creates an element with Gst Element Factory make.
Return the element if successfully created, otherwise print
to stderr and return None.
"""
print("Creating", name)
elm = Gst.ElementFactory.make(factoryname, name)
if not elm:
sys.stderr.write("Unable to create " + name + " \n")
if detail:
sys.stderr.write(detail)
return elm
def mulit_add(self,elements):
for i in elements:
self.pipeline.add(i)
def multi_link(self,elements):
# links mulit elements
for i in range(len(elements)-1):
elements[i].link(elements[i+1])
def osd_sink_pad_buffer_probe(self,pad,info,u_data):
frame_number=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
should_drop = self.drop_frame(frame_number)
if should_drop:
return Gst.PadProbeReturn.DROP
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def cb_newpad(self,decodebin, decoder_src_pad,data):
print("In cb_newpad\n")
caps=decoder_src_pad.get_current_caps()
if not caps:
caps = decoder_src_pad.query_caps()
gststruct=caps.get_structure(0)
gstname=gststruct.get_name()
source_bin=data
features=caps.get_features(0)
print("gstname=",gstname)
if(gstname.find("video")!=-1):
print("features=",features)
if features.contains("memory:NVMM"):
bin_ghost_pad=source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(decoder_src_pad):
sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
else:
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")
def decodebin_child_added(self,child_proxy,Object,name,user_data):
print("Decodebin child added:", name)
if(name.find("decodebin") != -1):
Object.connect("child-added",self.decodebin_child_added,user_data)
if "source" in name:
source_element = child_proxy.get_by_name("source")
if source_element.find_property('drop-on-latency') != None:
Object.set_property("drop-on-latency", True)
def create_source_bin(self,index,uri):
print("Creating source bin")
# Create a source GstBin to abstract this bin's content from the rest of the
# pipeline
bin_name="source-bin-%02d" %index
print(bin_name)
nbin=Gst.Bin.new(bin_name)
if not nbin:
sys.stderr.write(" Unable to create source bin \n")
else:
uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
if not uri_decode_bin:
sys.stderr.write(" Unable to create uri decode bin \n")
# We set the input uri to the source element
uri_decode_bin.set_property("uri",uri)
uri_decode_bin.connect("pad-added",self.cb_newpad,nbin)
uri_decode_bin.connect("child-added",self.decodebin_child_added,nbin)
Gst.Bin.add(nbin,uri_decode_bin)
bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
if not bin_pad:
sys.stderr.write(" Failed to add ghost pad in source bin \n")
return None
return nbin
def create_pipeline(self):
# 创建pipeline
self.pipeline = Gst.Pipeline()
# 创建输入源
source_bin = self.create_source_bin(0, self.src_name)
# 创建视频转换(裁剪)
nvvidconv_cut = self.make_elm_or_print_err("nvvideoconvert", "convertor_cut")
streammux = self.make_elm_or_print_err("nvstreammux", "Stream-muxer")
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
video = cv2.VideoCapture(self.src_name.split("file://")[-1])
frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
streammux.set_property('width', frame_width)
streammux.set_property('height', frame_height)
self.mulit_add([source_bin,nvvidconv_cut,streammux])
source_bin.link(nvvidconv_cut)
padname = "sink_0"
sinkpad = streammux.get_request_pad(padname)
srcpad = nvvidconv_cut.get_static_pad("src")
srcpad.link(sinkpad)
nvvidconv = self.make_elm_or_print_err("nvvideoconvert", "convertor") # Use convertor to convert from NV12 to RGBA as required by nvosd
nvosd = self.make_elm_or_print_err("nvdsosd", "onscreendisplay") # Create OSD to draw on the converted RGBA buffer
nvvidconv_postosd = self.make_elm_or_print_err("nvvideoconvert", "convertor_postosd")
caps = self.make_elm_or_print_err("capsfilter", "filter")
result_h264enc = self.make_elm_or_print_err("nvv4l2h264enc", "result_h264enc")
result_h264enc.set_property("bitrate", result_bitrate)
result_h264parser = self.make_elm_or_print_err("h264parse", "result_h264-parse")
result_container = self.make_elm_or_print_err("qtmux", "result_container") # mkv matroskamux
result_filesink = self.make_elm_or_print_err("filesink","result_filesink")
result_filesink.set_property("location",self.result_filename)
result_filesink.set_property("sync", self.sink_sync)
self.mulit_add([nvvidconv,nvosd,nvvidconv_postosd,caps,result_h264enc,result_h264parser,result_container,result_filesink])
osdsinkpad = nvosd.get_static_pad("sink")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0)
self.multi_link([streammux,nvvidconv,nvosd,nvvidconv_postosd,caps,result_h264enc,result_h264parser,result_container,result_filesink])
self.loop = GLib.MainLoop()
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", self.bus_call, self.loop)
def start(self):
self.pipeline.set_state(Gst.State.PLAYING)
self.loop.run()
if __name__ == "__main__":
c1 = ConvertVideo()
input_filename = 'input_video.mp4'
c1.src_name = "file://"+os.path.join(os.getcwd(),input_filename)
c1.result_filename = "output_video.mp4"
c1.create_pipeline()
c1.start()
c1.pipeline.set_state(Gst.State.NULL)