Error: gst-stream-error-quark: memory type configured and i/p buffer mismatch ip_surf 2 muxer 3 (1): gstnvstreammux.c(643)

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) - GeForce RTX 2080 Ti
• DeepStream Version - 6.2
• TensorRT Version - 8.5.2
• NVIDIA GPU Driver Version (valid for GPU only) - 535.54.03
• Issue Type( questions, new requirements, bugs) - bugs

Hi,

I modified deepstream-segmask to save the image after segmentation(i dont want mask as in the example), and i get:

Error: gst-stream-error-quark: memory type configured and i/p buffer mismatch ip_surf 2 muxer 3 (1): gstnvstreammux.c(643): gst_nvstreammux_chain (): /GstPipeline:pipeline0/GstNvStreamMux:Stream-muxer

This is my code:

#!/usr/bin/env python3

import sys
from syslog import LOG_WARNING
sys.path.append('../')
import gi
import configparser
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from ctypes import *
import time
import sys
import math
import platform
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from common.FPS import PERF_DATA
import numpy as np
import pyds
import cv2
import os
import os.path
from os import path
import argparse
perf_data = None
MAX_DISPLAY_LEN = 64
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 4000000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"

def tiler_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_number = 0
        while l_obj is not None:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            if is_first_obj and frame_number % 30 == 0:
                is_first_obj = False
                rectparams = obj_meta.rect_params
                maskparams = obj_meta.mask_params
                mask_image = resize_mask(maskparams, math.floor(rectparams.width), math.floor(rectparams.height))

                n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                frame_image = np.array(n_frame, copy=True, order='C')
                frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGR)
                original_image = cv2.resize(frame_image, (mask_image.shape[1], mask_image.shape[0]))

                # Create 3-channel mask image
                mask_image = cv2.merge((mask_image, mask_image, mask_image))

                # Apply mask to original image
                segmented_image = cv2.bitwise_and(original_image, mask_image)

                img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_number)
                cv2.imwrite(img_path, segmented_image)
            
            try:
                l_obj = l_obj.next
                obj_number += 1
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects)
        stream_index = "stream{0}".format(frame_meta.pad_index)
        global perf_data
        perf_data.update_fps(stream_index)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK

def clip(val, low, high):
    if val < low:
        return low 
    elif val > high:
        return high 
    else:
        return val

# Resize and binarize mask array for interpretable segmentation mask
def resize_mask(maskparams, target_width, target_height):
    src = maskparams.get_mask_array() # Retrieve mask array
    dst = np.empty((target_height, target_width), src.dtype) # Initialize array to store re-sized mask
    original_width = maskparams.width
    original_height = maskparams.height
    ratio_h = float(original_height) / float(target_height)
    ratio_w = float(original_width) / float(target_width)
    threshold = maskparams.threshold
    channel = 1

    # Resize from original width/height to target width/height 
    for y in range(target_height):
        for x in range(target_width):
            x0 = float(x) * ratio_w
            y0 = float(y) * ratio_h
            left = int(clip(math.floor(x0), 0.0, float(original_width - 1.0)))
            top = int(clip(math.floor(y0), 0.0, float(original_height - 1.0)))
            right = int(clip(math.ceil(x0), 0.0, float(original_width - 1.0)))
            bottom = int(clip(math.ceil(y0), 0.0, float(original_height - 1.0)))

            for c in range(channel):
                # H, W, C ordering
                # Note: lerp is shorthand for linear interpolation
                left_top_val = float(src[top * (original_width * channel) + left * (channel) + c])
                right_top_val = float(src[top * (original_width * channel) + right * (channel) + c])
                left_bottom_val = float(src[bottom * (original_width * channel) + left * (channel) + c])
                right_bottom_val = float(src[bottom * (original_width * channel) + right * (channel) + c])
                top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left)
                bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left)
                lerp = top_lerp + (bottom_lerp - top_lerp) * (y0 - top)
                if (lerp < threshold): # Binarize according to threshold
                    dst[y,x] = 0
                else:
                    dst[y,x] = 255
    return dst
                

def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    if (gstname.find("video") != -1):
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property('drop-on-latency') != None:
            Object.set_property("drop-on-latency", True)

def create_source_bin(index, uri):
    print("Creating source bin")

    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main(stream_paths, output_folder):
    global perf_data
    perf_data = PERF_DATA(len(stream_paths))
    number_sources = len(stream_paths)

    global folder_name
    folder_name = output_folder
    if path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
        sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)
    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streammux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        os.mkdir(folder_name + "/stream_" + str(i))
        print("Creating source_bin ", i, " \n ")
        uri_name = stream_paths[i]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if is_aarch64():
        print("Creating nv3dsink \n")
        sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink")
        if not sink:
            sys.stderr.write(" Unable to create nv3dsink \n")
    else:
        print("Creating EGLSink \n")
        sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest_segmask_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("sync", 0)
    sink.set_property("qos", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(stream_paths):
        print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

def parse_args():
    parser = argparse.ArgumentParser(prog="deepstream_segmask.py", 
                description="deepstream-segmask takes multiple URI streams as input" \
                    " and re-sizes and binarizes segmentation mask arrays to save to image")
    parser.add_argument(
        "-i",
        "--input",
        help="Path to input streams",
        nargs="+",
        metavar="URIs",
        default=["a"],
        required=True,
    )
    parser.add_argument(
        "-o",
        "--output",
        metavar="output_folder_name",
        default="out",
        help="Name of folder to output mask images",
    )

    args = parser.parse_args()
    stream_paths = args.input
    output_folder = args.output
    return stream_paths, output_folder

if __name__ == '__main__':
    stream_paths, output_folder = parse_args()
    sys.exit(main(stream_paths, output_folder))

Maybe it doesn’t need to be so complicated.

Here is a patch for save images after segmentation
out.patch (2.4 KB)

I had tried with deepstream-segmask. It’s can work fine.

I got Segmentation fault (core dumped)

#!/usr/bin/env python3

import sys
from syslog import LOG_WARNING

sys.path.append('../')
import gi
import configparser

gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from ctypes import *
import time
import sys
import math
import platform
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from common.FPS import PERF_DATA
import numpy as np
import pyds
import cv2
import os
import os.path
from os import path
import argparse

perf_data = None

MAX_DISPLAY_LEN = 64
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 4000000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"

# tiler_sink_pad_buffer_probe  will extract metadata received on tiler sink pad
# and re-size and binarize segmentation mask array to save to image
def osd_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_number = 0
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            if is_first_obj and frame_number % 30 == 0:
                is_first_obj = False
                
                n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                frame_image = np.array(n_frame, copy=True, order='C')
                frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGR)

                img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_number)

                cv2.imwrite(img_path, frame_image) # Save mask to image
            try:
                l_obj = l_obj.next
                obj_number += 1
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects)
        # update frame rate through this probe
        stream_index = "stream{0}".format(frame_meta.pad_index)
        global perf_data
        perf_data.update_fps(stream_index)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def clip(val, low, high):
    if val < low:
        return low 
    elif val > high:
        return high 
    else:
        return val

# Resize and binarize mask array for interpretable segmentation mask
def resize_mask(maskparams, target_width, target_height):
    src = maskparams.get_mask_array() # Retrieve mask array
    dst = np.empty((target_height, target_width), src.dtype) # Initialize array to store re-sized mask
    original_width = maskparams.width
    original_height = maskparams.height
    ratio_h = float(original_height) / float(target_height)
    ratio_w = float(original_width) / float(target_width)
    threshold = maskparams.threshold
    channel = 1

    # Resize from original width/height to target width/height 
    for y in range(target_height):
        for x in range(target_width):
            x0 = float(x) * ratio_w
            y0 = float(y) * ratio_h
            left = int(clip(math.floor(x0), 0.0, float(original_width - 1.0)))
            top = int(clip(math.floor(y0), 0.0, float(original_height - 1.0)))
            right = int(clip(math.ceil(x0), 0.0, float(original_width - 1.0)))
            bottom = int(clip(math.ceil(y0), 0.0, float(original_height - 1.0)))

            for c in range(channel):
                # H, W, C ordering
                # Note: lerp is shorthand for linear interpolation
                left_top_val = float(src[top * (original_width * channel) + left * (channel) + c])
                right_top_val = float(src[top * (original_width * channel) + right * (channel) + c])
                left_bottom_val = float(src[bottom * (original_width * channel) + left * (channel) + c])
                right_bottom_val = float(src[bottom * (original_width * channel) + right * (channel) + c])
                top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left)
                bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left)
                lerp = top_lerp + (bottom_lerp - top_lerp) * (y0 - top)
                if (lerp < threshold): # Binarize according to threshold
                    dst[y,x] = 0
                else:
                    dst[y,x] = 255
    return dst
                

def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    if (gstname.find("video") != -1):
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")


def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property('drop-on-latency') != None:
            Object.set_property("drop-on-latency", True)

def create_source_bin(index, uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main(stream_paths, output_folder):
    global perf_data
    perf_data = PERF_DATA(len(stream_paths))
    number_sources = len(stream_paths)

    global folder_name
    folder_name = output_folder
    if path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
        sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)
    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streammux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        os.mkdir(folder_name + "/stream_" + str(i))
        print("Creating source_bin ", i, " \n ")
        uri_name = stream_paths[i]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if is_aarch64():
        print("Creating nv3dsink \n")
        sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink")
        if not sink:
            sys.stderr.write(" Unable to create nv3dsink \n")
    else:
        print("Creating EGLSink \n")
        sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest_segmask_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    nvosd.set_property("display_mask", True) # Note: display-mask is supported only for process-mode=0 (CPU)
    nvosd.set_property('process_mode', 0)

    sink.set_property("sync", 0)
    sink.set_property("qos", 0)

    queue1=Gst.ElementFactory.make("queue","queue1")
    queue2=Gst.ElementFactory.make("queue","queue2")
    queue3=Gst.ElementFactory.make("queue","queue3")
    queue4=Gst.ElementFactory.make("queue","queue4")
    queue5=Gst.ElementFactory.make("queue","queue5")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tiler)
    tiler.link(queue3)
    queue3.link(nvvidconv)
    nvvidconv.link(queue4)
    queue4.link(nvosd)
    nvosd.link(queue5)
    queue5.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    osd_src_pad = nvosd.get_static_pad("src")
    
    if not osd_src_pad:
         sys.stderr.write(" Unable to get src pad \n")
    else:
        osd_src_pad.add_probe(Gst.PadProbeType.BUFFER, osd_src_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(stream_paths):
        print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

def parse_args():
    parser = argparse.ArgumentParser(prog="deepstream_segmask.py", 
                description="deepstream-segmask takes multiple URI streams as input" \
                    " and re-sizes and binarizes segmentation mask arrays to save to image")
    parser.add_argument(
        "-i",
        "--input",
        help="Path to input streams",
        nargs="+",
        metavar="URIs",
        default=["a"],
        required=True,
    )
    parser.add_argument(
        "-o",
        "--output",
        metavar="output_folder_name",
        default="out",
        help="Name of folder to output mask images",
    )

    args = parser.parse_args()
    stream_paths = args.input
    output_folder = args.output
    return stream_paths, output_folder

if __name__ == '__main__':
    stream_paths, output_folder = parse_args()
    sys.exit(main(stream_paths, output_folder))

Sorry for the mistake.

The previous patch was for jetson.

Try this patch
out.patch (3.8 KB)

There is a little difference from the previous

Now i get Error: gst-stream-error-quark: memory type configured and i/p buffer mismatch ip_surf 2 muxer 3 (1): gstnvstreammux.c(643): gst_nvstreammux_chain (): /GstPipeline:pipeline0/GstNvStreamMux:Stream-muxer

This is my code:

#!/usr/bin/env python3

import sys
from syslog import LOG_WARNING

sys.path.append('../')
import gi
import configparser

gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from ctypes import *
import time
import sys
import math
import platform
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from common.FPS import PERF_DATA
import numpy as np
import pyds
import cv2
import os
import os.path
from os import path
import argparse

perf_data = None

MAX_DISPLAY_LEN = 64
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 4000000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"

# tiler_sink_pad_buffer_probe  will extract metadata received on tiler sink pad
# and re-size and binarize segmentation mask array to save to image
def osd_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_number = 0
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            if is_first_obj and frame_number % 30 == 0:
                is_first_obj = False
                
                n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                frame_image = np.array(n_frame, copy=True, order='C')
                frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGR)

                img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_number)

                cv2.imwrite(img_path, frame_image) # Save mask to image

                if is_aarch64(): 
                    pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
            try:
                l_obj = l_obj.next
                obj_number += 1
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects)
        # update frame rate through this probe
        stream_index = "stream{0}".format(frame_meta.pad_index)
        global perf_data
        perf_data.update_fps(stream_index)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def clip(val, low, high):
    if val < low:
        return low 
    elif val > high:
        return high 
    else:
        return val

# Resize and binarize mask array for interpretable segmentation mask
def resize_mask(maskparams, target_width, target_height):
    src = maskparams.get_mask_array() # Retrieve mask array
    dst = np.empty((target_height, target_width), src.dtype) # Initialize array to store re-sized mask
    original_width = maskparams.width
    original_height = maskparams.height
    ratio_h = float(original_height) / float(target_height)
    ratio_w = float(original_width) / float(target_width)
    threshold = maskparams.threshold
    channel = 1

    # Resize from original width/height to target width/height 
    for y in range(target_height):
        for x in range(target_width):
            x0 = float(x) * ratio_w
            y0 = float(y) * ratio_h
            left = int(clip(math.floor(x0), 0.0, float(original_width - 1.0)))
            top = int(clip(math.floor(y0), 0.0, float(original_height - 1.0)))
            right = int(clip(math.ceil(x0), 0.0, float(original_width - 1.0)))
            bottom = int(clip(math.ceil(y0), 0.0, float(original_height - 1.0)))

            for c in range(channel):
                # H, W, C ordering
                # Note: lerp is shorthand for linear interpolation
                left_top_val = float(src[top * (original_width * channel) + left * (channel) + c])
                right_top_val = float(src[top * (original_width * channel) + right * (channel) + c])
                left_bottom_val = float(src[bottom * (original_width * channel) + left * (channel) + c])
                right_bottom_val = float(src[bottom * (original_width * channel) + right * (channel) + c])
                top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left)
                bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left)
                lerp = top_lerp + (bottom_lerp - top_lerp) * (y0 - top)
                if (lerp < threshold): # Binarize according to threshold
                    dst[y,x] = 0
                else:
                    dst[y,x] = 255
    return dst
                

def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    if (gstname.find("video") != -1):
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")


def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property('drop-on-latency') != None:
            Object.set_property("drop-on-latency", True)

def create_source_bin(index, uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main(stream_paths, output_folder):
    global perf_data
    perf_data = PERF_DATA(len(stream_paths))
    number_sources = len(stream_paths)

    global folder_name
    folder_name = output_folder
    if path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
        sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)
    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streammux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        os.mkdir(folder_name + "/stream_" + str(i))
        print("Creating source_bin ", i, " \n ")
        uri_name = stream_paths[i]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if is_aarch64():
        print("Creating nv3dsink \n")
        sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink")
        if not sink:
            sys.stderr.write(" Unable to create nv3dsink \n")
    else:
        print("Creating EGLSink \n")
        sink = Gst.ElementFactory.make("fakesink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest_segmask_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    nvosd.set_property("display_mask", True) # Note: display-mask is supported only for process-mode=0 (CPU)
    nvosd.set_property('process_mode', 0)

    sink.set_property("sync", 0)
    sink.set_property("qos", 0)

    if not is_aarch64():
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    queue1=Gst.ElementFactory.make("queue","queue1")
    queue2=Gst.ElementFactory.make("queue","queue2")
    queue3=Gst.ElementFactory.make("queue","queue3")
    queue4=Gst.ElementFactory.make("queue","queue4")
    queue5=Gst.ElementFactory.make("queue","queue5")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tiler)
    tiler.link(queue3)
    queue3.link(nvvidconv)
    nvvidconv.link(queue4)
    queue4.link(nvosd)
    nvosd.link(queue5)
    queue5.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    osd_src_pad = nvosd.get_static_pad("src")
    
    if not osd_src_pad:
         sys.stderr.write(" Unable to get src pad \n")
    else:
        osd_src_pad.add_probe(Gst.PadProbeType.BUFFER, osd_src_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(stream_paths):
        print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

def parse_args():
    parser = argparse.ArgumentParser(prog="deepstream_segmask.py", 
                description="deepstream-segmask takes multiple URI streams as input" \
                    " and re-sizes and binarizes segmentation mask arrays to save to image")
    parser.add_argument(
        "-i",
        "--input",
        help="Path to input streams",
        nargs="+",
        metavar="URIs",
        default=["a"],
        required=True,
    )
    parser.add_argument(
        "-o",
        "--output",
        metavar="output_folder_name",
        default="out",
        help="Name of folder to output mask images",
    )

    args = parser.parse_args()
    stream_paths = args.input
    output_folder = args.output
    return stream_paths, output_folder

if __name__ == '__main__':
    stream_paths, output_folder = parse_args()
    sys.exit(main(stream_paths, output_folder))

Do you modify something ? or some configuration file ?

I had tried below command, It can work fine.

python3 deepstream_segmask.py -i file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264

this is dstest_segmask_config.txt:

[property]
gpu-id=0
net-scale-factor=0.017507
offsets=123.675;116.280;103.53
model-color-format=0

#labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/dashcamnet/labels.txt
#tlt-encoded-model=/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/dashcamnet/resnet18_dashcamnet_pruned.etlt
#tlt-model-key=tlt_encode
#model-engine-file=/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/dashcamnet/resnet18_dashcamnet_pruned.etlt_b1_gpu0_int8.engine
#int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/dashcamnet/dashcamnet_int8.txt

labelfile-path=/opt/nvidia/deepstream/deepstream/samples/configs/tao_pretrained_models/peopleSegNet_labels.txt
tlt-encoded-model=/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/peopleSegNet/V2/peoplesegnet_resnet50.etlt
tlt-model-key=nvidia_tlt
model-engine-file=/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/peopleSegNet/V2/peoplesegnet_resnet50.etlt_b1_gpu0_fp16.engine
int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/peopleSegNet/V2/peoplesegnet_resnet50_int8.txt
infer-dims=3;576;960
uff-input-blob-name=Input
batch-size=1
## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=2
num-detected-classes=2
interval=0
gie-unique-id=1
network-type=3
output-blob-names=generate_detections;mask_fcn_logits/BiasAdd
parse-bbox-instance-mask-func-name=NvDsInferParseCustomMrcnnTLTV2
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/libnvds_infercustomparser.so
#no cluster
cluster-mode=4
output-instance-mask=1

[class-attrs-all]
pre-cluster-threshold=0.8

my code is in previous reply. I run it using: python3 deepstream_segmask_polygon_color_mask.py -i rtsp://stream -o output

Comment out line 349 like below.

#streammux.set_property("nvbuf-memory-type", mem_type)

There is a difference between rtsp and local files

1 Like

works in terminal, but saves only frame_0.jpg and i cant see the stream now.

Please modify the code according to your needs.

out.patch (3.8 KB)

Thanks.

doesnt resolve my problem, send all file atleast.

No one understands your needs better than you.I can only help you with basic needs.

Patch is the correct way to communicate with the community.

Just use git apply out.patch and git diff can understand other people’s work.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.