Deepstream python app for MJPG stream

Hardware Platform: Jetson Nano
DeepStream Version: 5.0
JetPack Version: 4.3
TensorRT Version: Not sure

Hi, I’m trying to run deepstream inference pipeline on a MJPG input stream. The camera info is as follows:

Driver Info (not using libv4l2):
   	Driver name   : uvcvideo
   	Card type     : USB3.0 HD Video Capture
   	Bus info      : usb-70090000.xusb-2.1
   	Driver version: 4.9.140
   	Capabilities  : 0x84200001
   		Video Capture
   		Streaming
   		Extended Pix Format
   		Device Capabilities
   	Device Caps   : 0x04200001
   		Video Capture
   		Streaming
   		Extended Pix Format
   ioctl: VIDIOC_ENUM_FMT
   	Index       : 0
   	Type        : Video Capture
   	Pixel Format: 'MJPG' (compressed)
   	Name        : Motion-JPEG
   		Size: Discrete 1920x1080
   			Interval: Discrete 0.017s (60.000 fps)
   			Interval: Discrete 0.033s (30.000 fps)
   		Size: Discrete 1280x720
   			Interval: Discrete 0.017s (60.000 fps)
   			Interval: Discrete 0.033s (30.000 fps)

I’m able to get the video streamed using the following commands but I’m not sure how to convert it into a pyhton written code:
gst-launch-1.0 v4l2src device=/dev/video0 io-mode=2 ! "image/jpeg,framerate=30/1,width=1920,height=1080" ! jpegparse ! nvjpegdec ! video/x-raw ! nvvidconv ! 'video/x-raw(memory:NVMM)' ! nvoverlaysink

I’ve written a python code similar to deepstream-test1 app for this. But I’m not able to get the pipeline running. The python code is as follows:

    def main(args):
            # Check input arguments
            if len(args) != 2:
                sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
                sys.exit(1)

            # Standard GStreamer initialization
            GObject.threads_init()
            Gst.init(None)

            # Create gstreamer elements
            # Create Pipeline element that will form a connection of other elements
            print("Creating Pipeline \n ")
            pipeline = Gst.Pipeline()

            if not pipeline:
                sys.stderr.write(" Unable to create Pipeline \n")

            # Source element for reading from the file
            print("Creating Source \n ")
            source = Gst.ElementFactory.make("v4l2src", "v4l2-source")
            if not source:
                sys.stderr.write(" Unable to create Source \n")

            caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
            if not caps_v4l2src:
                sys.stderr.write(" Unable to create v4l2src capsfilter \n")

            # Since the data format in the input file is elementary mjpg stream,
            # we need a jpegparser
            print("Creating JPEGParser \n")
            jpegparser = Gst.ElementFactory.make("jpegparse", "jpeg-parser")
            if not jpegparser:
                sys.stderr.write(" Unable to create jpeg parser \n")

            # Use nvjpegdec for hardware accelerated decode on GPU
            print("Creating Decoder \n")
            decoder = Gst.ElementFactory.make("nvjpegdec", "jpeg-decoder")
            if not decoder:
                sys.stderr.write(" Unable to create NvJPEG Decoder \n")

            # Create nvstreammux instance to form batches from one or more sources.
            streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
            if not streammux:
                sys.stderr.write(" Unable to create NvStreamMux \n")

            # Use nvinfer to run inferencing on camera's output,
            # behaviour of inferencing is set through config file
            pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
            if not pgie:
                sys.stderr.write(" Unable to create pgie \n")

            # Use convertor to convert from NV12 to RGBA as required by nvosd
            nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
            if not nvvidconv:
                sys.stderr.write(" Unable to create nvvidconv \n")

            # Create OSD to draw on the converted RGBA buffer
            nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

            if not nvosd:
                sys.stderr.write(" Unable to create nvosd \n")

            # Finally render the osd output
            if is_aarch64():
                transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")

            print("Creating EGLSink \n")
            sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
            if not sink:
                sys.stderr.write(" Unable to create egl sink \n")

            print("Playing file %s " %args[1])
            source.set_property('device', args[1])
            source.set_property('io-mode', 2)
            caps_v4l2src.set_property('caps', Gst.Caps.from_string("image/x-raw, framerate=30/1, width=1920, height=1080"))
            streammux.set_property('width', 1920)
            streammux.set_property('height', 1080)
            streammux.set_property('batch-size', 1)
            streammux.set_property('batched-push-timeout', 4000000)
            pgie.set_property('config-file-path', "dstest1_pgie_config.txt")

            print("Adding elements to Pipeline \n")
            pipeline.add(source)
            pipeline.add(caps_v4l2src)
            pipeline.add(jpegparser)
            pipeline.add(decoder)
            pipeline.add(streammux)
            pipeline.add(pgie)
            pipeline.add(nvvidconv)
            pipeline.add(nvosd)
            pipeline.add(sink)
            if is_aarch64():
                pipeline.add(transform)

            # we link the elements together
            # v4l-source -> caps -> jpeg-parser -> jpeg-decoder ->
            # nvinfer -> nvvidconv -> nvosd -> video-renderer
            print("Linking elements in the Pipeline \n")
            source.link(caps_v4l2src)
            caps_v4l2src.link(jpegparser)
            jpegparser.link(decoder)

            sinkpad = streammux.get_request_pad("sink_0")
            if not sinkpad:
                sys.stderr.write(" Unable to get the sink pad of streammux \n")
            srcpad = decoder.get_static_pad("src")
            if not srcpad:
                sys.stderr.write(" Unable to get source pad of decoder \n")
            srcpad.link(sinkpad)
            streammux.link(pgie)
            pgie.link(nvvidconv)
            nvvidconv.link(nvosd)
            if is_aarch64():
                nvosd.link(transform)
                transform.link(sink)
            else:
                nvosd.link(sink)

            # create an event loop and feed gstreamer bus mesages to it
            loop = GObject.MainLoop()
            bus = pipeline.get_bus()
            bus.add_signal_watch()
            bus.connect ("message", bus_call, loop)

            # Lets add probe to get informed of the meta data generated, we add probe to
            # the sink pad of the osd element, since by that time, the buffer would have
            # had got all the metadata.
            osdsinkpad = nvosd.get_static_pad("sink")
            if not osdsinkpad:
                sys.stderr.write(" Unable to get sink pad of nvosd \n")

            osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

            # start play back and listen to events
            print("Starting pipeline \n")
            pipeline.set_state(Gst.State.PLAYING)
            try:
                loop.run()
            except:
                pass
            # cleanup
            pipeline.set_state(Gst.State.NULL)

        if __name__ == '__main__':
            sys.exit(main(sys.argv))

I’m getting the following errors on running this:

Creating Pipeline 
 
Creating Source 
 
Creating JPEGParser 

Creating Decoder 

Creating EGLSink 

Playing file /dev/video0 
Warn: 'threshold' parameter has been deprecated. Use 'pre-cluster-threshold' instead.
Adding elements to Pipeline 

Linking elements in the Pipeline 

Starting pipeline 


Using winsys: x11 
0:00:03.932547254 17231     0x3611dc70 INFO                 nvinfer gstnvinfer.cpp:602:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1577> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
INFO: [Implicit Engine Info]: layers num: 3
0   INPUT  kFLOAT input_1         3x368x640       
1   OUTPUT kFLOAT conv2d_bbox     16x23x40        
2   OUTPUT kFLOAT conv2d_cov/Sigmoid 4x23x40         

0:00:03.932726120 17231     0x3611dc70 INFO                 nvinfer gstnvinfer.cpp:602:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:1681> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
0:00:03.958844176 17231     0x3611dc70 INFO                 nvinfer gstnvinfer_impl.cpp:311:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:dstest1_pgie_config.txt sucessfully
Error: gst-stream-error-quark: Internal data stream error. (1): gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:pipeline0/GstV4l2Src:v4l2-source:
streaming stopped, reason not-linked (-1)

Your pipeline is failing to start becuase some elements are not linked.

streaming stopped, reason not-linked (-1)

When you link, the link function returns a boolean indicating it’s success.

So you can change:

source.link(caps_v4l2src)

to

if not source.link(caps_v4l2src):
    sys.stderr.write(f"{source.name} did not link to {caps_v4l2src}\n")
    return -1;

(and so forth for the rest)

Ssince this is Python, you can write your own link function that raises an error on failure, use that to link and put your whole linking code in a try block` like below.

class LinkError(RuntimeError):
    """thrown on failure to link"""

def link_element(a, b):
    if not a.link(b):
        raise LinkError(f"{a.name} could not be linked to {b.name}")

try:
    # these are all elements
    link_element(source, converter)
    link_element(converter, sink)
except LinkError as err:
    print(err)
    # clean up and exit

(or just not use the try block and let the program terminate immdiately). Writing these sorts of things can make sense, espeically if your program grows in size and you have a lot of linking to do.

Linking by pad returns a GLib.Enum you can check as well.

if pad_a.link(pad_b) != Gst.PadLinkReturn.OK:
   # fail

if you want the reason a pad link failed, you can do this:

ret = pad_a.link(pad_b)
if ret != gst.PadLinkReturn.OK:
    # GLib.Enum has `.value_name` in Python to get the value name.
    sys.stderr.write(f"could not link {pad_a.name} to {pad_b.name} because {ret.value_name}")

GStreamer itself will also tell you what failed and why if you set the appropriate environment variable before running your script. Example:

GST_DEBUG=4 your_deepstream_script.py

You’ll get a ton of text in the terminal so you may wish to pipe it to a file so you can read it later. There are more debug utils here. Once you figure out what’s not linking and why, you should be able to fix the issue. If you can’t figure it out, don’t hesitate to keep asking questions.

Hi @mdegans, thank you very much for a quick and elaborate response!

This is really valuable info as it was definitely getting difficult to understand the responses from Gstreamer. I will try adding the try/catch blocks as you’ve mentioned to debug the code.

Thank you again! I will keep you updated on my results.

I was able to run the pipeline with inference on a mjpg stream with the following pipeline:

gst-launch-1.0 v4l2src device=/dev/video0 io-mode=2 ! 'image/jpeg,width=1920,height=1080,framerate=30/1' ! jpegparse ! jpegdec ! videoconvert ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=NV12' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=1920 height=1080 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-5.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=1920 height=1080 ! nvvideoconvert ! nvegltransform ! nveglglessink

It rruns pretty fast but I am still a bit confused, as to why does nvjpegdec not work in place of jpegdec. I run the pipeline with nvjpegdec as follows:

gst-launch-1.0 v4l2src device=/dev/video0 io-mode=2 ! 'image/jpeg,width=1920,height=1080,framerate=30/1' ! jpegparse ! nvjpegdec ! video/x-raw ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=NV12' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=1920 height=1080 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-5.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=1920 height=1080 ! nvvideoconvert ! nvegltransform ! fpsdisplaysink video-sink=nveglglessink text-overlay=0 -v

I get the following output with a SIGSEGV at src:

Using winsys: x11 
ERROR: Deserialize engine failed because file path: /opt/nvidia/deepstream/deepstream-5.0/sources/apps/sample_apps/deepstream-test1/../../../../samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine open error
0:00:08.897326966 12562   0x5594efa800 WARN                 nvinfer gstnvinfer.cpp:599:gst_nvinfer_logger:<nvinfer0> NvDsInferContext[UID 1]: Warning from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1566> [UID = 1]: deserialize engine from file :/opt/nvidia/deepstream/deepstream-5.0/sources/apps/sample_apps/deepstream-test1/../../../../samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine failed
0:00:08.897409103 12562   0x5594efa800 WARN                 nvinfer gstnvinfer.cpp:599:gst_nvinfer_logger:<nvinfer0> NvDsInferContext[UID 1]: Warning from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:1673> [UID = 1]: deserialize backend context from engine from file :/opt/nvidia/deepstream/deepstream-5.0/sources/apps/sample_apps/deepstream-test1/../../../../samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine failed, try rebuild
0:00:08.897442594 12562   0x5594efa800 INFO                 nvinfer gstnvinfer.cpp:602:gst_nvinfer_logger:<nvinfer0> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:1591> [UID = 1]: Trying to create engine from model files
WARNING: INT8 not supported by platform. Trying FP16 mode.
INFO: [TRT]: Some tactics do not have sufficient workspace memory to run. Increasing workspace size may increase performance, please check verbose output.
INFO: [TRT]: Detected 1 inputs and 2 output network tensors.
0:00:56.217138829 12562   0x5594efa800 INFO                 nvinfer gstnvinfer.cpp:602:gst_nvinfer_logger:<nvinfer0> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:1624> [UID = 1]: serialize cuda engine to file: /opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine successfully
INFO: [Implicit Engine Info]: layers num: 3
0   INPUT  kFLOAT input_1         3x368x640       
1   OUTPUT kFLOAT conv2d_bbox     16x23x40        
2   OUTPUT kFLOAT conv2d_cov/Sigmoid 4x23x40         

0:00:56.441011121 12562   0x5594efa800 INFO                 nvinfer gstnvinfer_impl.cpp:311:notifyLoadModelStatus:<nvinfer0> [UID 1]: Load new model:/opt/nvidia/deepstream/deepstream-5.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt sucessfully
Pipeline is live and does not need PREROLL ...
Got context from element 'eglglessink0': gst.egl.EGLDisplay=context, display=(GstEGLDisplay)NULL;
/GstPipeline:pipeline0/GstFPSDisplaySink:fpsdisplaysink0/GstEglGlesSink:eglglessink0: sync = true
Setting pipeline to PLAYING ...
New clock: GstSystemClock
/GstPipeline:pipeline0/GstV4l2Src:v4l2src0.GstPad:src: caps = image/jpeg, width=(int)1920, height=(int)1080, framerate=(fraction)30/1, pixel-aspect-ratio=(fraction)1/1, colorimetry=(string)2:4:7:1, interlace-mode=(string)progressive
/GstPipeline:pipeline0/GstCapsFilter:capsfilter0.GstPad:src: caps = image/jpeg, width=(int)1920, height=(int)1080, framerate=(fraction)30/1, pixel-aspect-ratio=(fraction)1/1, colorimetry=(string)2:4:7:1, interlace-mode=(string)progressive
/GstPipeline:pipeline0/GstJpegParse:jpegparse0.GstPad:sink: caps = image/jpeg, width=(int)1920, height=(int)1080, framerate=(fraction)30/1, pixel-aspect-ratio=(fraction)1/1, colorimetry=(string)2:4:7:1, interlace-mode=(string)progressive
/GstPipeline:pipeline0/GstCapsFilter:capsfilter0.GstPad:sink: caps = image/jpeg, width=(int)1920, height=(int)1080, framerate=(fraction)30/1, pixel-aspect-ratio=(fraction)1/1, colorimetry=(string)2:4:7:1, interlace-mode=(string)progressive
/GstPipeline:pipeline0/GstJpegParse:jpegparse0.GstPad:src: caps = image/jpeg, parsed=(boolean)true, format=(string)UYVY, width=(int)1920, height=(int)1080, framerate=(fraction)30/1
/GstPipeline:pipeline0/GstNvJpegDec:nvjpegdec0.GstPad:sink: caps = image/jpeg, parsed=(boolean)true, format=(string)UYVY, width=(int)1920, height=(int)1080, framerate=(fraction)30/1
/GstPipeline:pipeline0/GstNvJpegDec:nvjpegdec0.GstPad:src: caps = video/x-raw, format=(string)I420, width=(int)1920, height=(int)1080, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, chroma-site=(string)mpeg2, colorimetry=(string)1:4:0:0, framerate=(fraction)30/1
/GstPipeline:pipeline0/GstCapsFilter:capsfilter1.GstPad:src: caps = video/x-raw, format=(string)I420, width=(int)1920, height=(int)1080, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, chroma-site=(string)mpeg2, colorimetry=(string)1:4:0:0, framerate=(fraction)30/1
/GstPipeline:pipeline0/Gstnvvideoconvert:nvvideoconvert0.GstPad:src: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction)30/1, format=(string)NV12
/GstPipeline:pipeline0/GstCapsFilter:capsfilter2.GstPad:src: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction)30/1, format=(string)NV12
/GstPipeline:pipeline0/GstNvStreamMux:mux.GstPad:src: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)NV12, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstNvInfer:nvinfer0.GstPad:src: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)NV12, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstNvMultiStreamTiler:nvmultistreamtiler0.GstPad:src: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)NV12, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/Gstnvvideoconvert:nvvideoconvert1.GstPad:src: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)NV12, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstNvEglTransform:nvegltransform0.GstPad:src: caps = video/x-raw(memory:EGLImage), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)RGBA, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstFPSDisplaySink:fpsdisplaysink0.GstGhostPad:sink.GstProxyPad:proxypad0: caps = video/x-raw(memory:EGLImage), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)RGBA, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstFPSDisplaySink:fpsdisplaysink0/GstEglGlesSink:eglglessink0.GstPad:sink: caps = video/x-raw(memory:EGLImage), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)RGBA, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstFPSDisplaySink:fpsdisplaysink0.GstGhostPad:sink: caps = video/x-raw(memory:EGLImage), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)RGBA, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstNvEglTransform:nvegltransform0.GstPad:sink: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)NV12, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/Gstnvvideoconvert:nvvideoconvert1.GstPad:sink: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)NV12, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstNvMultiStreamTiler:nvmultistreamtiler0.GstPad:sink: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)NV12, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstNvInfer:nvinfer0.GstPad:sink: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, framerate=(fraction)30/1, format=(string)NV12, batch-size=(int)1, num-surfaces-per-frame=(int)1
/GstPipeline:pipeline0/GstNvStreamMux:mux.GstNvStreamPad:sink_0: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction)30/1, format=(string)NV12
/GstPipeline:pipeline0/GstCapsFilter:capsfilter2.GstPad:sink: caps = video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction)30/1, format=(string)NV12
/GstPipeline:pipeline0/Gstnvvideoconvert:nvvideoconvert0.GstPad:sink: caps = video/x-raw, format=(string)I420, width=(int)1920, height=(int)1080, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, chroma-site=(string)mpeg2, colorimetry=(string)1:4:0:0, framerate=(fraction)30/1
/GstPipeline:pipeline0/GstCapsFilter:capsfilter1.GstPad:sink: caps = video/x-raw, format=(string)I420, width=(int)1920, height=(int)1080, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, chroma-site=(string)mpeg2, colorimetry=(string)1:4:0:0, framerate=(fraction)30/1
nvbuf_utils: dmabuf_fd -1 mapped entry NOT found
nvbuf_utils: Can not get HW buffer from FD... Exiting...
Caught SIGSEGV

I will try to convert the first pipeline into a python code and keep the thread updated.