I want to display DeepStream output in a GUI framework

Hello
I would like to use DeepStream’s Python binding to display inference results in Flet, but it seems that the output of nvdsosd is not being converted properly. I have specified the BGR format, but the conversion result looks like the image I sent. Please give me some advice.
ds_flet_test_2.zip (2.3 KB)
output

I think there should be some problems with your pipeline. The following is the correct code. I don’t have a USB camera, so I used filesrc to simulate v4l2src.

Use this command line to generate out.yuv, and then run the following code.

ffmpeg -ss 00:00 -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 -pix_fmt yuyv422 -r 30 -s 640x480 out.yuv
import sys
import gi
import threading
import flet as ft
import base64
import cv2
import numpy as np

gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst

Gst.init(None)

def main(page: ft.Page):
    page.title = "Camera Feed"

    image_box = ft.Image(width=640, height=480)
    video_container = ft.Container(image_box, alignment=ft.alignment.center, expand=True)
    page.add(ft.Row([video_container]))

    def bus_call(bus, message, loop):
        t = message.type
        if t == Gst.MessageType.EOS or t == Gst.MessageType.ERROR:
            print(message.parse_error())  # エラーメッセージを出力
            loop.quit()
        return True

    def update_images():

        # GStreamerの標準初期化
        Gst.init(None)
        
        # GStreamerパイプラインの作成
        pipeline = Gst.Pipeline()

        # 入力ソースの読み込み
        # source = Gst.ElementFactory.make("v4l2src", "source")
        # source.set_property("device", "/dev/video0")

        source = Gst.ElementFactory.make("filesrc", "source")
        source.set_property("location", "out.yuv")

        # ソース用のフィルタ
        # filter = Gst.ElementFactory.make("capsfilter", "filter")
        # caps = Gst.Caps.from_string("video/x-raw, format=YUY2, width=640, height=480, framerate=30/1")
        # filter.set_property("caps", caps)

        videoparse = Gst.ElementFactory.make("videoparse", "videoparse")
        videoparse.set_property("format", 4)
        videoparse.set_property("width", 640)
        videoparse.set_property("height", 480)
        videoparse.set_property("framerate", Gst.Fraction(30, 1))

        # カメラフォーマットのビデオを画面表示可能なフォーマットに変換
        # videoconvert = Gst.ElementFactory.make("videoconvert", "convert")
        # videoconvert2 = Gst.ElementFactory.make("videoconvert", "convert2")

        # 入力バッファのメモリ形式を変換するエレメントの作成
        nvconvertsrc = Gst.ElementFactory.make("nvvideoconvert", "nvconvertersrc")

        # メモリ形式変換の制約を設定するエレメントの作成
        caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
        caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))

        # 複数のストリームを統合するエレメントの作成
        streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
        # 出力ストリームの設定
        streammux.set_property('width', 640)
        streammux.set_property('height', 480)
        streammux.set_property('batch-size', 1)

        # 学習モデルの実行
        # 推論エンジンの設定ファイルの指定
        pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")

        pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt")
        # pgie.set_property('config-file-path', "Primary_Detector/dstest1_pgie_config.txt")

        # 画面表示可能なフォマットに変換
        nvvidconvert = Gst.ElementFactory.make("nvvideoconvert", "nvconverter")
        nvvidconvert2 = Gst.ElementFactory.make("nvvideoconvert", "nvconverter2")
        nvvidconvert2.set_property("compute-hw", 1)

        # raw_filter = Gst.ElementFactory.make("capsfilter", "raw_filter")
        # raw_filter.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:RAW)"))

        # 推論結果を映像に描画
        nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

        # RGBxデータとして扱うためのフィルタ
        # bgrx_filter = Gst.ElementFactory.make("capsfilter", "rgbx_filter")
        # bgrx_caps = Gst.Caps.from_string("video/x-raw, format=(string)BGRx")
        # bgrx_filter.set_property("caps", bgrx_caps)

        # RGBデータとして扱うためのフィルタ
        bgr_filter = Gst.ElementFactory.make("capsfilter", "rgb_filter")
        bgr_caps = Gst.Caps.from_string("video/x-raw, format=BGR")
        bgr_filter.set_property("caps", bgr_caps)

        # メディアデータを渡すためのシンク
        appsink = Gst.ElementFactory.make("appsink", "appsink")
        appsink.set_property("emit-signals", True)
        appsink.set_property("max-buffers", 1)
        appsink.set_property("drop", True)

        pipeline.add(source)
        pipeline.add(videoparse)
        # pipeline.add(filter)
        # pipeline.add(videoconvert)
        # pipeline.add(videoconvert2)
        pipeline.add(nvconvertsrc)
        pipeline.add(caps_vidconvsrc)
        pipeline.add(streammux)
        pipeline.add(pgie)
        pipeline.add(nvvidconvert)
        # pipeline.add(nvvidconvert2)
        pipeline.add(nvosd)
        pipeline.add(nvvidconvert2)
        # pipeline.add(raw_filter)
        # pipeline.add(bgrx_filter)
        pipeline.add(bgr_filter)
        pipeline.add(appsink)

        source.link(videoparse)
        # filter.link(videoconvert)
        # videoconvert.link(nvconvertsrc)
        videoparse.link(nvconvertsrc)
        nvconvertsrc.link(caps_vidconvsrc)

        sinkpad = streammux.get_request_pad("sink_0")
        if not sinkpad:
            sys.stderr.write(" Unable to get the sink pad of streammux \n")
        srcpad = caps_vidconvsrc.get_static_pad("src")
        if not srcpad:
            sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
        srcpad.link(sinkpad)

        streammux.link(pgie)
        pgie.link(nvvidconvert)
        nvvidconvert.link(nvosd)
        nvosd.link(nvvidconvert2)
        nvvidconvert2.link(bgr_filter)
        # bgrx_filter.link(videoconvert2)
        # videoconvert2.link(bgr_filter)
        bgr_filter.link(appsink)

        def on_new_sample(appsink):
            sample = appsink.emit("pull-sample")
            if sample:
                buffer = sample.get_buffer()
                # gst_memory = buffer.get_memory(0)
                # # NVMMメモリかどうかをチェック
                # is_nvmm = gst_memory.is_type("nvmm")
                # print("Is NVMM Memory:", is_nvmm)
                success, map_info = buffer.map(Gst.MapFlags.READ)
                if success:
                    array = np.ndarray((480, 640, 3), dtype=np.uint8, buffer=map_info.data)
                    jpg_img = cv2.imencode('.jpg', array)[1]
                    b64_string = base64.b64encode(jpg_img).decode('utf-8')
                    image_box.src_base64 = b64_string
                    page.update()
                    # print("Buffer size:", map_info.size)
                    # cv2.imwrite("output.jpg", array)  # 画像ファイルとして保存
                buffer.unmap(map_info)
            return Gst.FlowReturn.OK


        appsink.connect("new-sample", on_new_sample)
        pipeline.set_state(Gst.State.PLAYING)

        loop = GLib.MainLoop()
        bus = pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", bus_call, loop)
        loop.run()

        pipeline.set_state(Gst.State.NULL)

    threading.Thread(target=update_images, daemon=True).start()

if __name__ == "__main__":
    ft.app(target=main)

Regarding the use of v4l2src in deepstream, refer to this FAQ.

You can also use gst-launch-1.0 to debug the pipeline

gst-launch-1.0 -e filesrc location=out.yuv ! videoparse format=4 width=640 height=480 framerate=30/1 ! nvvideoconvert ! "video/x-raw(memory:NVMM), format=NV12" ! m.sink_0 nvstreammux name=m width=640 height=480 batch-size=1 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt ! nvvideoconvert ! nvdsosd ! nvvideoconvert ! "video/x-raw, format=RGBA" ! appsink

I have successfully displayed the output of nvdsosd on the screen with nv3dsink. I think it looks like the image when converting BGRA to BGR.

Ok, the above code can be displayed on the GUI using BGR, I modified the problem in your code

The image looks like this.

Follow this FAQ to check your v4l2 camara. The above code is just for simulation using YUY2 files

I ran it using the YUY2 file.

Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU)
• DeepStream Version
• JetPack Version (valid for Jetson only)
• TensorRT Version
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)
• The pipeline being used

I tried it on orin DS-6.4, are you using dGPU?

Sorry, I didn’t provide complete information.
I am using Jetson orin nano developer kit (8GB).
JetPack version is 6.0.
DeepStream version is 6.4.
TensorRT version is 8.6.2.3.
I am trying to perform inference on USB camera footage in real time and display the results on a GUI application using Fret.
Screen display with nv3dsink is successful. I’m trying to change nv3dsink to appsink and display it on a GUI app.
However, the image displayed is not the desired result.
The input does not need to be a USB camera. The purpose is to display Deepstream inference results on a GUI application.

No, I’m using Jetson orin nano.

Pay attention to the Orin nano you are using. Are you using a software encoder? Due to a bug in DS-6.4, the software encoder is currently unavailable on Orin nano.

Since I don’t have an Orin nano, the code I provided above was only tested on AGX Orin.

So what pipeline are you currently using? First make sure your pipeline runs successfully with gst-launch-1.0.

No software encoder is used.
After nvdsosd, when changing the memory format with nvvideoconvert and displaying it with autovideosink, an error occurred that the video buffer was dropping a large amount.
Is this not displayed on the GUI due to the insufficient processing speed of orin nano?

There may be a timestamping problem, or this computer is too slow.
Warning: gst-core-error-quark: A lot of buffers are being dropped. (13): …/libs/gst/base/gstbasesink.c(3143): gst_base_sink_is_too_late (): /GstPipeline:pipeline0/GstAutoVideoSink:file_output/GstXvImageSink:file_output-actual-sink-xvimage:

Please share error logs based on the steps and code I have provided here.

Since I don’t have an Orin nano, I am using AGX orin and they should be the same without using a hardware encoder.

I tried it with AGX Orin’s jetpack5.0.1 and deepstream6.1 and it worked without any problems.
It also worked with jetpack5.1.3 and deepstream6.3 on Orin nano.
However, with Orin nano, jetpack 6.0 and deepstream 6.4, the results are not the same.
What is the cause?

I use AGX orin with JP-6.0 and DS-6.4, it works fine.

You can try replacing appsink with glimagesink, this may be a problem with flutter.
If the following command line works normally, it means that this is not a problem with deepstream.

ffmpeg -ss 00:00 -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 -pix_fmt yuyv422 -r 30 -s 640x480 out.yuv


gst-launch-1.0 -e filesrc location=out.yuv ! videoparse format=4 width=640 height=480 framerate=30/1 ! nvvideoconvert ! "video/x-raw(memory:NVMM), format=NV12" ! m.sink_0 nvstreammux name=m width=640 height=480 batch-size=1 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt ! nvvideoconvert ! nvdsosd ! nvvideoconvert compute-hw=1 ! "video/x-raw, format=BGR" ! glimagesink

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

1 Like

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.