@AastaLLL Hi AastaLLL:
sorry for too late reply, I’ve asked to do another project.
- my RTSP python program is based on deepstream-test3 python app, i just changed nvinfer model, and did not change the gstreamer pipeline.
below is the pgie config file:
[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
tlt-model-key=tlt_encode
tlt-encoded-model=resnet34_peoplenet_pruned.etlt
labelfile-path=peoplenet_labels.txt
model-engine-file=resnet34_peoplenet_pruned.etlt_b1_gpu0_fp16.engine
input-dims=3;544;960;0
uff-input-blob-name=input_1
batch-size=1
process-mode=1
model-color-format=0
0=FP32, 1=INT8, 2=FP16 mode
network-mode=2
num-detected-classes=3
cluster-mode=1
interval=1
gie-unique-id=1
output-blob-names=output_bbox/BiasAdd;output_cov/Sigmoid
[class-attrs-all]
pre-cluster-threshold=0.4
Set eps=0.7 and minBoxes for cluster-mode=1(DBSCAN)
eps=0.7
minBoxes=1
- my USB python program use same model as above model (TLT Peoplenet resnet34), and in order to get 1080p USB camera video stream, I changed the pipeline of deepstream-test1-usbcam python program for getting MJPG stream.
the main pipeline code list below:
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write(“usage: %s \n” % args[0])
sys.exit(1)
for i in range(0, len(args)-1):
fps_streams["stream{0}".format(i)] = GETFPS(i)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
jpegparser = Gst.ElementFactory.make("jpegparse", "jpeg-parser")
jpegdecoder = Gst.ElementFactory.make("jpegdec", "jpeg-decoder")
vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
tracker = Gst.ElementFactory.make("nvtracker", "tracker")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if is_aarch64():
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
source.set_property('device', args[1])
caps_v4l2src.set_property('caps', Gst.Caps.from_string("image/jpeg, width = 1920, height = 1080, framerate = 30/1"))
caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM), format=NV12"))
streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "test_pgie_resnet34.txt")
config = configparser.ConfigParser()
config.read('tracker_config.txt')
config.sections()
for key in config['tracker']:
if key == 'tracker-width':
tracker_width = config.getint('tracker', key)
tracker.set_property('tracker-width', tracker_width)
if key == 'tracker-height':
tracker_height = config.getint('tracker', key)
tracker.set_property('tracker-height', tracker_height)
if key == 'gpu-id':
tracker_gpu_id = config.getint('tracker', key)
tracker.set_property('gpu_id', tracker_gpu_id)
if key == 'll-lib-file':
tracker_ll_lib_file = config.get('tracker', key)
tracker.set_property('ll-lib-file', tracker_ll_lib_file)
if key == 'll-config-file':
tracker_ll_config_file = config.get('tracker', key)
tracker.set_property('ll-config-file', tracker_ll_config_file)
if key == 'enable-batch-process':
tracker_enable_batch_process = config.getint('tracker', key)
tracker.set_property('enable_batch_process', tracker_enable_batch_process)
# Set sync = false to avoid late frame drops at the display-sink
sink.set_property('sync', False)
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(caps_v4l2src)
pipeline.add(jpegparser)
pipeline.add(jpegdecoder)
pipeline.add(vidconvsrc)
pipeline.add(nvvidconvsrc)
pipeline.add(caps_vidconvsrc)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
if is_aarch64():
pipeline.add(transform)
# we link the elements together
# v4l2src -> nvvideoconvert -> mux ->
# nvinfer -> nvvideoconvert -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(caps_v4l2src)
caps_v4l2src.link(jpegparser)
jpegparser.link(jpegdecoder)
jpegdecoder.link(vidconvsrc)
vidconvsrc.link(nvvidconvsrc)
nvvidconvsrc.link(caps_vidconvsrc)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = caps_vidconvsrc.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(tracker)
tracker.link(nvvidconv)
nvvidconv.link(nvosd)
if is_aarch64():
nvosd.link(transform)
transform.link(sink)
else:
nvosd.link(sink)
# create an event loop and feed gstreamer bus messages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
# start play back and listen to events
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
pipeline.set_state(Gst.State.NULL)
- pls. kindly to help me, if it’s too late for reply, i can open a new topic, many thanks