• Hardware Platform (Jetson / GPU)
Xavier NX
• DeepStream Version
6.0
• JetPack Version (valid for Jetson only)
4.6
• TensorRT Version
8.0.1
I am having an issue outputting video files when using a webcam source, where the .mp4 file will be created and contain data but is not playable.
I have been able to use video files as the stream source and run inference on them then output to a .mp4 file that works, so I believe the issue is to do with how I am ending the stream of the camera source, which at the moment is basically just setting the pipeline state to NULL.
Im assuming there is some kind of messaging that can tell the stream to stop gracefully so as not to cause this error but unsure how to accomplish this…
Here is the test script I am currently using:
import sys
sys.path.append('../../')
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
import pyds
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
source = Gst.ElementFactory.make("v4l2src", "v4l2-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
if not caps_v4l2src:
sys.stderr.write(" Unable to create v4l2src capsfilter \n")
# Since the data format in the input file is elementary mjpg stream,
# we need a jpegparser
print("Creating JPEGParser \n")
jpegparser = Gst.ElementFactory.make("jpegparse", "jpeg-parser")
if not jpegparser:
sys.stderr.write(" Unable to create jpeg parser \n")
# Use nvjpegdec for hardware accelerated decode on GPU
print("Creating Decoder \n")
decoder = Gst.ElementFactory.make("jpegdec", "jpeg-decoder")
if not decoder:
sys.stderr.write(" Unable to create NvJPEG Decoder \n")
# nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
if not nvvidconvsrc:
sys.stderr.write(" Unable to create Nvvideoconvert \n")
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
if not caps_vidconvsrc:
sys.stderr.write(" Unable to create capsfilter \n")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on camera's output,t
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2")
if not nvvidconv2:
sys.stderr.write(" Unable to create nvvidconv2 \n")
capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
if not capsfilter:
sys.stderr.write(" Unable to create capsfilter \n")
caps = Gst.Caps.from_string("video/x-raw, format=I420")
capsfilter.set_property("caps", caps)
encoder = Gst.ElementFactory.make("avenc_mpeg4", "encoder")
if not encoder:
sys.stderr.write(" Unable to create encoder \n")
encoder.set_property("bitrate", 2000000)
print("Creating Code Parser \n")
codeparser = Gst.ElementFactory.make("mpeg4videoparse", "mpeg4-parser")
if not codeparser:
sys.stderr.write(" Unable to create code parser \n")
print("Creating Container \n")
container = Gst.ElementFactory.make("qtmux", "qtmux")
if not container:
sys.stderr.write(" Unable to create code parser \n")
print("Creating Sink \n")
sink = Gst.ElementFactory.make("filesink", "filesink")
if not sink:
sys.stderr.write(" Unable to create file sink \n")
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
if not transform:
sys.stderr.write(" Unable to create transform \n")
vsink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
vsink.set_property('sync', False)
sink.set_property("location", "./out.mp4")
sink.set_property("sync", 1)
sink.set_property("async", 0)
print("Playing file %s " %args[1])
caps_v4l2src.set_property('caps', Gst.Caps.from_string("image/jpeg,width=1920,height=1080,framerate=25/1 format=MJPG"))
caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "../ConfigFiles/inference_0.txt")
sink.set_property("sync", False)
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(caps_v4l2src)
pipeline.add(jpegparser)
pipeline.add(decoder)
pipeline.add(nvvidconvsrc)
pipeline.add(caps_vidconvsrc)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(nvvidconv2)
pipeline.add(encoder)
pipeline.add(capsfilter)
pipeline.add(codeparser)
pipeline.add(container)
pipeline.add(sink)
# we link the elements together
# file-source -> h264-parser -> nvh264-decoder ->
# nvinfer -> nvvidconv -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(caps_v4l2src)
caps_v4l2src.link(jpegparser)
jpegparser.link(decoder)
decoder.link(nvvidconvsrc)
nvvidconvsrc.link(caps_vidconvsrc)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = caps_vidconvsrc.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(nvvidconv2)
nvvidconv2.link(capsfilter)
capsfilter.link(encoder)
encoder.link(codeparser)
sinkpad1 = container.get_request_pad("video_0")
if not sinkpad1:
sys.stderr.write(" Unable to get the sink pad of qtmux \n")
srcpad1 = codeparser.get_static_pad("src")
if not srcpad1:
sys.stderr.write(" Unable to get mpeg4 parse src pad \n")
srcpad1.link(sinkpad1)
container.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
# start play back and listen to events
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except KeyboardInterrupt:
print('STOPPED')
pass
# cleanup
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))