Having trouble blurring faces in DeepStream 7.1

Please provide complete information as applicable to your setup.

• Hardware Platform: Jetson Orin Nano
• DeepStream Version: 7.1.0
• JetPack Version: 6.1
• TensorRT Version: 10.3.0.30-1+cuda12.5
• NVIDIA GPU Driver Version (valid for GPU only)**
• Issue Type: Question

Hi,

So I am trying to create a deepstream application using the Python bindings that uses peoplenet to first detect a person, then use peoplenet again as an SGIE to detect a face, and then finally blur that face.

I used a sample created by someone at NVIDIA from a previous forum question: [deepstream-python] how to blur object using nvbufsurface (it was called deepstream_test_1_blur.py). This application worked but wasn’t outputting the video with the blurring happening live, and was instead was just creating an image every 300 frames of the video showing the blurred faces.

I had a project I made that made the face detection bounding box black, and thought I was adapt it using that test_1_blur solution to blur the faces instead. And I keep running into the issue of not being able to see the blurred faces on the OSD. It just shows the bounding boxes with no faces blurred, but in the terminal it says that it’s blurring the face. Have tried everything under the sun and can’t seem to figure out why it’s not showing the blurred faces.

I’ve attached my code below someone could take a look and figure out what’s going on. I’m using a .mp4 video.

import sys
sys.path.append('../')
import platform
import configparser

import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call

import numpy as np
import cv2
import os
import pyds

sys.path.append('../')
gi.require_version('Gst', '1.0')


PGIE_CLASS_ID_PERSON = 2



def blur_obj_pad_buffer_probe(pad, info, u_data):
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        # image_array: RGBA
        image_array = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
        frame_image_np = np.array(image_array, copy=True, order='C')
        frame_copy = cv2.cvtColor(frame_image_np, cv2.COLOR_RGBA2BGRA)

        l_obj = frame_meta.obj_meta_list

        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            # crop image
            x1 = int(obj_meta.rect_params.left)
            y1 = int(obj_meta.rect_params.top)
            x2 = int(obj_meta.rect_params.left + obj_meta.rect_params.width)
            y2 = int(obj_meta.rect_params.top + obj_meta.rect_params.height)
            roi = frame_copy[y1:y2, x1:x2]
            # applying a gaussian blur over this new rectangle area 
            roi = cv2.GaussianBlur(roi, (23, 23), 30) 
            # impose this blurred image on original image to get final image 
            frame_copy[y1:y1+roi.shape[0], x1:x1+roi.shape[1]] = roi

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        if frame_number % 300 == 0:
            filename = os.path.join('.', "frame_{}.png".format(frame_number))
            cv2.imwrite(filename, frame_copy)

        if is_aarch64():  # If Jetson, since the buffer is mapped to CPU for retrieval, it must also be unmapped
            # The unmap call should be made after operations with the original array are complete.
            pyds.unmap_nvds_buf_surface(buffer, frame_meta.batch_id)
            #  The original array cannot be accessed after this call.
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def main(args):
    # Check input arguments
    if len(args) != 2:
        sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("filesrc", "file-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    # Since the data format in the input file is elementary h264 stream,
    # we need a h264parser
    print("Creating H264Parser \n")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    if not h264parser:
        sys.stderr.write(" Unable to create h264 parser \n")

    # Use nvdec_h264 for hardware accelerated decode on GPU
    print("Creating Decoder \n")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
    if not decoder:
        sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    caps = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter.set_property("caps", caps)

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    # Finally render the osd output
    if is_aarch64():
        print("Creating nv3dsink \n")
        sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink")
        if not sink:
            sys.stderr.write(" Unable to create nv3dsink \n")
    else:
        print("Creating EGLSink \n")
        # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
        sink = Gst.ElementFactory.make("fakesink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")

    print("Playing file %s " % args[1])
    source.set_property('location', args[1])
    # Only set these properties if not using new gst-nvstreammux
    if os.environ.get('USE_NEW_NVSTREAMMUX') != 'yes':
        streammux.set_property('width', 1920)
        streammux.set_property('height', 1080)
        streammux.set_property('batched-push-timeout', 4000000)

    streammux.set_property('batch-size', 1)
    pgie.set_property('config-file-path', "dstest1_pgie_config.txt")

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(h264parser)
    pipeline.add(decoder)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvvidconv)
    pipeline.add(filter)
    pipeline.add(nvosd)
    pipeline.add(sink)

    # we link the elements together
    # file-source -> h264-parser -> nvh264-decoder ->
    # nvinfer -> nvvidconv -> nvosd -> video-renderer
    print("Linking elements in the Pipeline \n")
    source.link(h264parser)
    h264parser.link(decoder)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = decoder.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of decoder \n")
    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(nvvidconv)
    nvvidconv.link(filter)
    filter.link(nvosd)
    nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, blur_obj_pad_buffer_probe, 0)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)


if __name__ == '__main__':
    sys.exit(main(sys.argv))

Fixed it!

Changed

frame_image_np = np.array(image_array, copy=True, order='C')

To

frame_image_np = np.array(image_array, copy=False, order='C')

I was only blurring the copied frames, not the ones that were actually outputting.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.