Cannot send frame image of rtsp by mqtt

• Hardware Platform (Jetson / GPU)
Jetson Orin NX
• DeepStream Version
DeepStream 7.0
• JetPack Version (valid for Jetson only)
JetPack 6.0
• TensorRT Version
TensorRT 8.6.2.3
• NVIDIA GPU Driver Version (valid for GPU only)
NVIDIA UNIX Open Kernel Module for aarch64 540.3.0
• Issue Type( questions, new requirements, bugs)
I’m trying to send image and message from a rtsp by mqtt and also output a rtsp stream, but it doesn’t work. When I try to watch the output stream the stream crushes and I don’t get the message, but the app seems still running. Here’s my code I have problem with

def pgie_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        print("Frame Number=", frame_number)
        if ts_from_rtsp:
            ts = frame_meta.ntp_timestamp / 1000000000
            print("RTSP Timestamp:", datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'))

        obj_meta_list = frame_meta.obj_meta_list
        while obj_meta_list is not None:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(obj_meta_list.data)
            except StopIteration:
                break
            
            ####################################################################

            n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
            n_frame = draw_bounding_boxes(n_frame, obj_meta, obj_meta.confidence)

            # Convert to numpy array and encode
            frame_copy = np.array(n_frame, copy=True, order='C')
            frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)
            _, img_encoded = cv2.imencode('.jpg', frame_copy)
            base64_encoded_image = base64.b64encode(img_encoded).decode('utf-8')

            if platform_info.is_integrated_gpu():
               pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
            
            ###################################################################

            obj_data = {
                "frame_number": frame_number,
                "class_id": obj_meta.class_id,
                "object_id": obj_meta.object_id,
                "confidence": obj_meta.confidence,
                "bbox": {
                    "left": obj_meta.rect_params.left,
                    "top": obj_meta.rect_params.top,
                    "width": obj_meta.rect_params.width,
                    "height": obj_meta.rect_params.height
                },
                "image": base64_encoded_image
            }
            publish_mqtt_message(obj_data)

            try:
                obj_meta_list = obj_meta_list.next
            except StopIteration:
                break

        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK

When I deleted the code between #s and “image” in the data, the app can send message by mqtt and stream the rtsp properly. I guess there’s something wrong with the code there but I don’t know what it is.

Full code

import sys
import base64
import cv2
import numpy as np
sys.path.append("../")
from common.bus_call import bus_call
from common.platform_info import PlatformInfo
import pyds
import platform
import math
import time
from ctypes import *
import gi
gi.require_version("Gst", "1.0")
gi.require_version("GstRtspServer", "1.0")
from gi.repository import Gst, GstRtspServer, GLib
import configparser
import datetime
import paho.mqtt.client as mqtt
import json
import argparse

MAX_DISPLAY_LEN = 64
PGIE_CLASS_ID_PWH = 0
PGIE_CLASS_ID_PWV = 1
PGIE_CLASS_ID_PNH = 2
PGIE_CLASS_ID_PNV = 3
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 33000
TILED_OUTPUT_WIDTH = 1280
TILED_OUTPUT_HEIGHT = 720
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
OSD_PROCESS_MODE = 0
OSD_DISPLAY_TEXT = 0
PGIE_CONFIG_FILE = "config_infer_primary.txt"
MSCONV_CONFIG_FILE = "msgconv_config.txt"
schema_type = 0
proto_lib = "/opt/nvidia/deepstream/deepstream-7.0/lib/libnvds_mqtt_proto.so"
conn_str = "localhost;1883;deepstream/notify"
pgie_classes_str = ["pwh", "pwv", "pnh", "pnv"]

# MQTT configurations
MQTT_BROKER = "localhost"
MQTT_PORT = 1883
MQTT_TOPIC = "deepstream/notify"
    
'''    
def extract_frame_as_base64(gst_buffer, frame_meta):
    # Convert GstBuffer to numpy array
    n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
    frame_image = np.array(n_frame, copy=True, order='C')
    frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA)

    # Encode image to base64
    _, buffer = cv2.imencode('.jpg', frame_image)
    frame_base64 = base64.b64encode(buffer).decode('utf-8')
    
    return frame_base64
'''

def pgie_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        print("Frame Number=", frame_number)
        if ts_from_rtsp:
            ts = frame_meta.ntp_timestamp / 1000000000
            print("RTSP Timestamp:", datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'))
        
        #frame_base64 = extract_frame_as_base64(gst_buffer, frame_meta)

        obj_meta_list = frame_meta.obj_meta_list
        while obj_meta_list is not None:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(obj_meta_list.data)
            except StopIteration:
                break
            
            #########################################################################

            n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
            n_frame = draw_bounding_boxes(n_frame, obj_meta, obj_meta.confidence)

            # Convert to numpy array and encode
            frame_copy = np.array(n_frame, copy=True, order='C')
            frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)
            _, img_encoded = cv2.imencode('.jpg', frame_copy)
            base64_encoded_image = base64.b64encode(img_encoded).decode('utf-8')

            if platform_info.is_integrated_gpu():
               pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
            
            ###########################################################################

            obj_data = {
                "frame_number": frame_number,
                "class_id": obj_meta.class_id,
                "object_id": obj_meta.object_id,
                "confidence": obj_meta.confidence,
                "bbox": {
                    "left": obj_meta.rect_params.left,
                    "top": obj_meta.rect_params.top,
                    "width": obj_meta.rect_params.width,
                    "height": obj_meta.rect_params.height
                },
                "image": base64_encoded_image
            }
            publish_mqtt_message(obj_data)

            try:
                obj_meta_list = obj_meta_list.next
            except StopIteration:
                break

        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK

def draw_bounding_boxes(image, obj_meta, confidence):
    confidence = '{0:.2f}'.format(confidence)
    rect_params = obj_meta.rect_params
    top = int(rect_params.top)
    left = int(rect_params.left)
    width = int(rect_params.width)
    height = int(rect_params.height)
    obj_name = pgie_classes_str[obj_meta.class_id]
    # image = cv2.rectangle(image, (left, top), (left + width, top + height), (0, 0, 255, 0), 2, cv2.LINE_4)
    color = (0, 0, 255, 0)
    w_percents = int(width * 0.05) if width > 100 else int(width * 0.1)
    h_percents = int(height * 0.05) if height > 100 else int(height * 0.1)
    linetop_c1 = (left + w_percents, top)
    linetop_c2 = (left + width - w_percents, top)
    image = cv2.line(image, linetop_c1, linetop_c2, color, 6)
    linebot_c1 = (left + w_percents, top + height)
    linebot_c2 = (left + width - w_percents, top + height)
    image = cv2.line(image, linebot_c1, linebot_c2, color, 6)
    lineleft_c1 = (left, top + h_percents)
    lineleft_c2 = (left, top + height - h_percents)
    image = cv2.line(image, lineleft_c1, lineleft_c2, color, 6)
    lineright_c1 = (left + width, top + h_percents)
    lineright_c2 = (left + width, top + height - h_percents)
    image = cv2.line(image, lineright_c1, lineright_c2, color, 6)
    # Note that on some systems cv2.putText erroneously draws horizontal lines across the image
    image = cv2.putText(image, obj_name + ',C=' + str(confidence), (left - 10, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 0, 255, 0), 2)
    return image

def publish_mqtt_message(data):
    client = mqtt.Client()
    client.connect(MQTT_BROKER, MQTT_PORT, 60)
    client.publish(MQTT_TOPIC, json.dumps(data))
    client.disconnect()

def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    print("gstname=", gstname)
    if gstname.find("video") != -1:
        print("features=", features)
        if features.contains("memory:NVMM"):
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write("Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

    if ts_from_rtsp:
        if name.find("source") != -1:
            pyds.configure_source_for_ntp_sync(hash(Object))

def create_source_bin(index, uri):
    print("Creating source bin")

    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write("Unable to create source bin \n")

    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write("Unable to create uri decode bin \n")
    uri_decode_bin.set_property("uri", uri)
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write("Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main(args):
    number_sources = len(args)

    global platform_info
    platform_info = PlatformInfo()
    Gst.init(None)

    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write("Unable to create Pipeline \n")
    print("Creating streamux \n ")

    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write("Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.request_pad_simple(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)

    print("Creating Pgie \n ")
    if gie == "nvinfer":
        pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    else:
        pgie = Gst.ElementFactory.make("nvinferserver", "primary-inference")
    if not pgie:
        sys.stderr.write("Unable to create pgie \n")
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write("Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write("Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write("Unable to create nvosd \n")
    nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
    if not nvvidconv_postosd:
        sys.stderr.write("Unable to create nvvidconv_postosd \n")

    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))

    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write("Unable to create encoder")
    encoder.set_property("bitrate", bitrate)
    if platform_info.is_integrated_gpu():
        encoder.set_property("preset-level", 1)
        encoder.set_property("insert-sps-pps", 1)

    if codec == "H264":
        rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
        print("Creating H264 rtppay")
    elif codec == "H265":
        rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay")
        print("Creating H265 rtppay")
    if not rtppay:
        sys.stderr.write("Unable to create rtppay")

    updsink_port_num = 5400
    sink = Gst.ElementFactory.make("udpsink", "udpsink")
    if not sink:
        sys.stderr.write("Unable to create udpsink")
    
    sink.set_property("host", "224.224.255.255")
    sink.set_property("port", updsink_port_num)
    sink.set_property("async", False)
    sink.set_property("sync", 1)

    streammux.set_property("width", MUXER_OUTPUT_WIDTH)
    streammux.set_property("height", MUXER_OUTPUT_HEIGHT)
    streammux.set_property("batch-size", number_sources)
    streammux.set_property("batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC)
    pgie.set_property('config-file-path', PGIE_CONFIG_FILE)
    
    if ts_from_rtsp:
        streammux.set_property("attach-sys-ts", 0)

    if gie=="nvinfer":
        pgie.set_property("config-file-path", PGIE_CONFIG_FILE)
    else:
        pgie.set_property("config-file-path", "dstest1_pgie_inferserver_config.txt")

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv_postosd)
    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(rtppay)
    pipeline.add(sink)

    streammux.link(pgie)
    pgie.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(nvvidconv_postosd)
    nvvidconv_postosd.link(caps)
    caps.link(encoder)
    encoder.link(rtppay)
    rtppay.link(sink)

    print("Linking elements in the Pipeline \n")

    tiler.set_property("rows", int(math.sqrt(number_sources)))
    tiler.set_property("columns", int(math.ceil((1.0 * number_sources) / tiler.get_property("rows"))))
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    pgie_batch_size = pgie.get_property("batch-size")
    if pgie_batch_size != number_sources:
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n")
        pgie.set_property("batch-size", number_sources)

    print("Creating RTSP Streaming Server \n")
    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % RTSP_PORT
    server.attach(None)

    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch("( udpsrc name=pay0 port=%d caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=H264, payload=96 \" )" % updsink_port_num)
    factory.set_shared(True)
    server.get_mount_points().add_factory(MOUNT_POINT, factory)
    print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d%s ***\n\n" % (RTSP_PORT, MOUNT_POINT))

    print("Starting pipeline \n")
    loop = GLib.MainLoop()

    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    pgie_src_pad = pgie.get_static_pad("src")
    if not pgie_src_pad:
        sys.stderr.write("Unable to get src pad\n")
    else:
        pgie_src_pad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0)

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property("live-source", 1)

    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass

    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("input_uri", nargs='+', help="URI of the input streams")
    parser.add_argument("--gie", default="nvinfer", choices=["nvinfer", "nvinferserver"], help="Type of GIE (default: nvinfer)")
    parser.add_argument("--codec", default="H264", choices=["H264", "H265"], help="Codec type (default: H264)")
    parser.add_argument("--bitrate", type=int, default=4000000, help="Bitrate of encoding (default: 4000000)")
    parser.add_argument("--rtsp-port", type=int, default=8554, help="Port for RTSP (default: 8554)")
    parser.add_argument("--mount-point", default="/ds-test", help="Mount point for RTSP (default: /ds-test)")
    parser.add_argument("--ts-from-rtsp", action='store_true', help="Extract NTP timestamps from RTSP")
    args = parser.parse_args()

    global gie
    global codec
    global bitrate
    global RTSP_PORT
    global MOUNT_POINT
    global ts_from_rtsp

    gie = args.gie
    codec = args.codec
    bitrate = args.bitrate
    RTSP_PORT = args.rtsp_port
    MOUNT_POINT = args.mount_point
    ts_from_rtsp = args.ts_from_rtsp

    main(args.input_uri)

do you mean you are using player to play the output rtsp and the player stopped rendering?

seems you are using the third-party lib. please add some logs in publish_mqtt_message to check if the the app hung in this function. we also suggest using the ready-made sending broker solution. please refer to deepstream_test_4.py.

Yes, the stream stops when I use player to watch it. The app can work when I send the message of detected object. However it can’t work properly when I try to send image data in the message.

  1. using third-party lib to sending broker would be outside of DeepStream. you can add log in publish_mqtt_message to check if the the app hung and the return value of client.publish is correct.
  2. C++ demo /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test4/ already supports sending image to broker. please refer to “3.Send the image by the broker” in the readme. then port the logics.

I refered to deepstream_imagedata-multistream.py and fixed my code and I encountered an error

I tried to apply tiler_sink_pad_buffer_probe and added related code.
Here’s the full code of tiler_sink_pad_buffer_probe

def tiler_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_counter = {
            PGIE_CLASS_ID_PEOPLE_WITH_HELMET: 0,
            PGIE_CLASS_ID_PEOPLE_WITH_VEST: 0,
            PGIE_CLASS_ID_PEOPLE_NO_HELMET: 0,
            PGIE_CLASS_ID_PEOPLE_NO_VEST: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            # Periodically check for objects with borderline confidence value that may be false positive detections.
            # If such detections are found, annotate the frame with bboxes and confidence value.
            # Save the annotated frame to file.
            if saved_count["stream_{}".format(frame_meta.pad_index)] % 30 == 0 and (
                    MIN_CONFIDENCE < obj_meta.confidence < MAX_CONFIDENCE):
                if is_first_obj:
                    is_first_obj = False
                    # Getting Image data using nvbufsurface
                    # the input should be address of buffer and batch_id
                    n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                    n_frame = draw_bounding_boxes(n_frame, obj_meta, obj_meta.confidence)
                    # convert python array into numpy array format in the copy mode.
                    frame_copy = np.array(n_frame, copy=True, order='C')
                    # convert the array into cv2 default color format
                    frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)
                    if platform_info.is_integrated_gpu():
                        # If Jetson, since the buffer is mapped to CPU for retrieval, it must also be unmapped 
                        pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) # The unmap call should be made after operations with the original array are complete.
                                                                                            #  The original array cannot be accessed after this call.

                save_image = True

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects, "pnh_count=",
              obj_counter[PGIE_CLASS_ID_PEOPLE_NO_HELMET], "pnv_count=", obj_counter[PGIE_CLASS_ID_PEOPLE_NO_VEST])
        # update frame rate through this probe
        stream_index = "stream{0}".format(frame_meta.pad_index)
        global perf_data
        perf_data.update_fps(stream_index)
        if save_image:
            img_path = "{}/stream_{}/frame_{}.jpg".format("frames", frame_meta.pad_index, frame_number)
            cv2.imwrite(img_path, frame_copy)
        saved_count["stream_{}".format(frame_meta.pad_index)] += 1
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK

How can I fix the error?

did you define the global variable saved_count? using unmodified deepstream_imagedata-multistream.py, can you reproduce the “KeyError: 'stream_0”? if no, please compare the unmodified code with your custom code to narrow down this issue.

Yes, I defined saved_count.
I tried unmodified deepstream_imagedata-multistream.py and encountered another error

And I also tried gst-launch-1.0 videotestsrc ! nv3dsink command and it shows

for a workaround, you can use fakesink instead. Is this still an DeepStream issue to support? Thanks!

Problem solved. Thanks for your help!

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.