License plate detection is not detecting for all vehicle

Please provide complete information as applicable to your setup.

• Hardware Platform GPU
• DeepStream Version 7.0
• TensorRT Version 10.6.0
• NVIDIA GPU Driver Version 560.35.03

Description:
I’m trying to use trafficcamnet for detecting vehicle, for each detected vehicle I’m using license plate detection model.

Problem:
license plate detection model is not working for all cars

Example:


PGIE: (Trafficcamnet)

[property]

gpu-id=0
net-scale-factor=0.00392156862745098

onnx-file=trafficcamnet_models/resnet18_trafficcamnet_pruned.onnx

labelfile-path=trafficcamnet_models/labels.txt

model-engine-file=trafficcamnet_models/resnet18_trafficcamnet_pruned.onnx_b1_gpu0_fp32.engine

process-mode=1

model-color-format=0

## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=0

num-detected-classes=4

interval=0

gie-unique-id=1

cluster-mode=2
 
[class-attrs-all]
pre-cluster-threshold=0.3
topk=300
nms-iou-threshold=0.5

SGIE2: (license plate)

[property]

gpu-id=0
net-scale-factor=0.00392156862745098

onnx-file=lpdnet_model/LPDNet_usa_pruned_tao5.onnx

labelfile-path=lpdnet_model/labels_lpdnet.txt

model-engine-file=lpdnet_model/LPDNet_usa_pruned_tao5.onnx_b4_gpu0_fp32.engine

# int8-calib-file=models/resnet34_peoplenet_int8.txt

batch-size=16

## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=0

model-color-format=1

## 0 detector 1 classifier 2 segmentatio 3 instance segmentation
network-type=0

process-mode=2

interval=0

gie-unique-id=2

num-detected-classes=1

operate-on-gie-id=1

operate-on-class-ids=0

input-object-min-height=20
 	
input-object-min-width=20

cluster-mode=2
 
[class-attrs-all]
pre-cluster-threshold=0.3
topk=300
nms-iou-threshold=0.5

code:

import sys
sys.path.append('../')
import gi
import configparser
gi.require_version('Gst', '1.0')
gi.require_version("GstRtspServer", "1.0")
from gi.repository import GstRtspServer
from gi.repository import Gst, GLib
from gi.repository import GLib
from ctypes import *
import time
import sys
import pyds
import platform
from common.platform_info import PlatformInfo
import os
import pytz
from threading import Thread, Lock
from datetime import datetime
from zoneinfo import ZoneInfo
from urllib.parse import urlparse
from datetime import datetime, timedelta
import uuid
import json
import yaml



OSD_PROCESS_MODE = 0
OSD_DISPLAY_TEXT = 1

streammux_height = 1080
streammux_width = 1920

trafficcamnet_pgie_config = "configs/trafficcamnet_pgie_config.txt"
ldpnet_pgie_config = "configs/lpdnet_pgie_config.txt"
tracker_config_path = "configs/dsnvanalytics_tracker_config.txt"


def cb_newpad(decodebin, decoder_src_pad,data):
    print("In cb_newpad\n")
    caps=decoder_src_pad.get_current_caps()
    if not caps:
        caps = decoder_src_pad.query_caps()
    gststruct=caps.get_structure(0)
    gstname=gststruct.get_name()
    source_bin=data
    features=caps.get_features(0)
    print("gstname=",gstname)
    if(gstname.find("video")!=-1):
        print("features=",features)
        if features.contains("memory:NVMM"):
            bin_ghost_pad=source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy,Object,name,user_data):
    print("Decodebin child added:", name, "\n")
    if(name.find("decodebin") != -1):
        Object.connect("child-added",decodebin_child_added,user_data)
    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property('drop-on-latency') != None:
            Object.set_property("drop-on-latency", True)

def create_source_bin(index,uri):
    print("Creating source bin")
    bin_name="source-bin-%02d" %index
    print(bin_name)
    nbin=Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")
    uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    uri_decode_bin.set_property("uri",uri)
    uri_decode_bin.connect("pad-added",cb_newpad,nbin)
    uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
    Gst.Bin.add(nbin,uri_decode_bin)
    bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin


def bus_call(bus, message, loop):
    t = message.type
    if t == Gst.MessageType.EOS:
        sys.stdout.write("End-of-stream")
        loop.quit()
    elif t==Gst.MessageType.WARNING:
        err, debug = message.parse_warning()
        sys.stderr.write("Warning: %s: %s" % (err, debug))
    elif t == Gst.MessageType.ERROR:
        err, debug = message.parse_error()
        sys.stderr.write("Error: %s: %s" % (err, debug))
        loop.quit()
    return True

def extract_rtsp_details(rtsp_url):
    parsed_uri = urlparse(rtsp_url)
    if parsed_uri.path and parsed_uri.path != '/':
        return parsed_uri.path.split('/')[-1]
    else:
        return f"{parsed_uri.port}"

IST = pytz.timezone('Asia/Kolkata')
def create_dynamic_path(base_path, ip_port):
    now = datetime.now()
    now = datetime.now(IST) 
    dynamic_path = os.path.join(
        base_path,
        ip_port,
        str(now.year)+"_year",
        f"{now.month:02d}_month",
        f"{now.day:02d}_day",
        f"{now.hour:02d}_hour",
        f"{now.minute:02d}_minute",
    )
    os.makedirs(dynamic_path, exist_ok=True)
    return dynamic_path

last_saved_time = {}  
save_img_time_interval = 1

def frame_filter_pad_probe(pad, info, user_data):
    global last_saved_time 
    current_time = time.time()
    stream_id = user_data["stream_id"]
    uri_name = user_data["uri_name"]
    if stream_id not in last_saved_time:
        last_saved_time[stream_id] = 0
    if current_time - last_saved_time[stream_id] >= save_img_time_interval:
        last_saved_time[stream_id] = current_time
        cam_id_ip_port = extract_rtsp_details(uri_name)
        base_path = "/Deepstream_output/rtsp_v3"
        image_folder_path  = create_dynamic_path(base_path, cam_id_ip_port)
        now = datetime.now(IST)
        image_name = f"{now.year}_{now.month:02d}_{now.day:02d}_{now.hour:02d}_{now.minute:02d}_{now.second:02d}.jpg"
        image_save_path = os.path.join(image_folder_path, image_name)
        print('image_save_path', image_save_path)
        multifilesink = pad.get_parent_element()
        multifilesink.set_property("location", image_save_path)
    return Gst.PadProbeReturn.OK 

def make_element(element_name, i):
    element = Gst.ElementFactory.make(element_name, element_name)
    if not element:
        sys.stderr.write(" Unable to create {0}".format(element_name))
    element.set_property("name", "{0}-{1}".format(element_name, str(i)))
    return element


def sgie_probe(pad, info, u_data):
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer")
        return Gst.PadProbeReturn.OK

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list  # This is a GList, needs manual traversal

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        print(f"Processing Frame Number: {frame_meta.frame_num}")

        l_obj = frame_meta.obj_meta_list  # Iterate through detected objects
        while l_obj is not None:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
                print(f"Object ID: {obj_meta.object_id}, Class ID: {obj_meta.class_id}")
            except StopIteration:
                break
            l_obj = l_obj.next  # Move to the next object

        l_frame = l_frame.next  # Move to the next frame

    return Gst.PadProbeReturn.OK



def main(args):

    global loop, platform_info

    print('args', args, len(args))

    num_sources = len(args)-1
    print("num_sources",num_sources)

    platform_info = PlatformInfo()
    Gst.init(None)

    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline ")

    print("Creating streammux")
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write("Unable to create NvStreamMux ")
    streammux.set_property("batched-push-timeout", 40000)
    streammux.set_property("batch-size", num_sources)
    pipeline.add(streammux)

    for i in range(num_sources):
        print("Creating source_bin ",i," \n ")
        uri_name=args[i+1]
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad= streammux.request_pad_simple(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)

    queue1=Gst.ElementFactory.make("queue","queue1")
    queue2=Gst.ElementFactory.make("queue","queue2")
    queue3=Gst.ElementFactory.make("queue","queue3")
    queue4=Gst.ElementFactory.make("queue","queue4")

    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")

    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
   
    sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine")

    nvstreamdemux = Gst.ElementFactory.make("nvstreamdemux", "nvstreamdemux")

    pgie.set_property('config-file-path', trafficcamnet_pgie_config)
    pgie.set_property("batch-size",num_sources)

    sgie1.set_property('config-file-path', ldpnet_pgie_config)

    sgie1.set_property('infer-on-gie-id', 1)
    
    pipeline.add(queue1,queue2,queue3,queue4,pgie,sgie1,nvstreamdemux,tracker)

    config = configparser.ConfigParser()
    config.read(tracker_config_path)
    config.sections()
 
    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)

    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(sgie1)
    sgie1.link(queue3)
    queue3.link(nvstreamdemux)

    sgie_sink_pad = sgie1.get_static_pad("sink")
    if not sgie_sink_pad:
        sys.stderr.write("Unable to get SGIE sink pad \n")
    else:
        sgie_sink_pad.add_probe(Gst.PadProbeType.BUFFER, sgie_probe, None)

    for i in range(num_sources):

        queue5 = Gst.ElementFactory.make("queue",  f"queue5_{i}")
        nvvideoconvert = Gst.ElementFactory.make("nvvideoconvert", f"nvvideoconvert_{i}")
        nvdsosd = Gst.ElementFactory.make("nvdsosd",f"nvdsosd_{i}")
        queue6 = Gst.ElementFactory.make("queue", f"queue6_{i}")
        nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", f"nvvideoconvert2_{i}")
        jpegenc = Gst.ElementFactory.make("nvjpegenc", f"jpeg-encoder_{i}")
        multifilesink = Gst.ElementFactory.make("multifilesink", f"multi-file-sink_{i}")

        pipeline.add(queue5, nvvideoconvert, nvdsosd, queue6, nvvidconv2, jpegenc, multifilesink)

        nvdsosd.set_property("process-mode", OSD_PROCESS_MODE)
        nvdsosd.set_property("display-text", OSD_DISPLAY_TEXT)
        
        padname = "src_%u" % i
        demuxsrcpad = nvstreamdemux.request_pad_simple(padname)
        if not demuxsrcpad:
            sys.stderr.write("Unable to create demux src pad \n")
 
        queuesinkpad = queue5.get_static_pad("sink")
        if not queuesinkpad:
            sys.stderr.write("Unable to create queue sink pad \n")
        demuxsrcpad.link(queuesinkpad)

        multifilesink.set_property("post-messages", True)

        queue5.link(nvvideoconvert)
        nvvideoconvert.link(nvdsosd)
        nvdsosd.link(queue6)
        queue6.link(nvvidconv2)
        nvvidconv2.link(jpegenc)
        jpegenc.link(multifilesink)

        uri_name = args[i + 1]

        probe_data = {
            "stream_id": i,
            "uri_name": uri_name,
        }

        sinkpad = multifilesink.get_static_pad("sink")
        if not sinkpad:
            sys.stderr.write("Unable to get sink pad of multifilesink \n")
            sys.exit(1)

        sinkpad.add_probe(Gst.PadProbeType.BUFFER, frame_filter_pad_probe, probe_data)
 

    loop = GLib.MainLoop()
    
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    pipeline.set_state(Gst.State.PAUSED)

    print("Now playing...")
    for i, source in enumerate(args):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline ")

    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        print("Stopping pipeline")
        pass

    print("Exiting app")
    pipeline.set_state(Gst.State.NULL)



if __name__ == '__main__':
    main(sys.argv)

The LPDNet_usa_pruned_tao5.onnx model is trained with the Carlifornia USA car license plate dataset. Are your license plates the Carlifornia car plates?

yes
license plates are similar to Carlifornia car plates

Can you try the sample deepstream_tao_apps/apps/tao_others/deepstream_lpr_app at master · NVIDIA-AI-IOT/deepstream_tao_apps?

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks