Segmentation fault (core dumped) when tracker enable on DS 5.1

I get this error when I enable tracker on deepstream-imagedata-multistream example

pipeline:

    streammux.link(face_detector)
	face_detector.link(tracker)
	tracker.link(nvvidconv1)
	nvvidconv1.link(filter1)
	filter1.link(tiler)
	tiler.link(face_recogniser)
	face_recogniser.link(nvvidconv)
	nvvidconv.link(nvosd)
	if is_aarch64():
		nvosd.link(transform)
		transform.link(sink)
	else:
		nvosd.link(sink)

It work fine on deepstream 5.0

• Hardware Platform: NX
• DeepStream 5.1
• JetPack Version: 4.5

There is no useful information in your description.

I got this error when I run face recognition and detection models. It work fine if I disable tracker.

main function:

    def main(args):
	# Check input arguments
	# if len(args) < 2:
	# 	sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0])
	# 	sys.exit(1)

	for i in range(0, len(args) - 1):
		fps_streams["stream{0}".format(i)] = GETFPS(i)
	number_sources = len(args) - 1

	print("number_sources ",number_sources)
	# global folder_name
	# folder_name = args[-1]
	# if path.exists(folder_name):
	# 	sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
	# 	sys.exit(1)

	# os.mkdir(folder_name)
	# print("Frames will be saved in ", folder_name)


	# Standard GStreamer initialization
	GObject.threads_init()
	Gst.init(None)

	# Create gstreamer elements */
	# Create Pipeline element that will form a connection of other elements
	print("Creating Pipeline \n ")
	pipeline = Gst.Pipeline()
	is_live = False

	if not pipeline:
		sys.stderr.write(" Unable to create Pipeline \n")
	print("Creating streamux \n ")

	# Create nvstreammux instance to form batches from one or more sources.
	streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
	if not streammux:
		sys.stderr.write(" Unable to create NvStreamMux \n")

	pipeline.add(streammux)
	for i in range(number_sources):
		# os.mkdir(folder_name + "/stream_" + str(i))
		# frame_count["stream_" + str(i)] = 0
		# saved_count["stream_" + str(i)] = 0
		print("Creating source_bin ", i, " \n ")
		uri_name = args[i + 1]
		if uri_name.find("rtsp://") == 0:
			is_live = True
		source_bin = create_source_bin(i, uri_name)
		if not source_bin:
			sys.stderr.write("Unable to create source bin \n")
		pipeline.add(source_bin)
		padname = "sink_%u" % i
		sinkpad = streammux.get_request_pad(padname)
		if not sinkpad:
			sys.stderr.write("Unable to create sink pad bin \n")
		srcpad = source_bin.get_static_pad("src")
		if not srcpad:
			sys.stderr.write("Unable to create src pad bin \n")
		srcpad.link(sinkpad)
	print("Creating Pgie \n ")
	face_detector = Gst.ElementFactory.make("nvinfer", "face-detector-inference")
	if not face_detector:
		sys.stderr.write(" Unable to create face_detector \n")
	
	tracker = Gst.ElementFactory.make("nvtracker", "tracker")
	if not tracker:
		sys.stderr.write(" Unable to create tracker \n")

	face_recogniser = Gst.ElementFactory.make("nvinfer", "face-recogniser-inference")
	if not face_recogniser:
		sys.stderr.write(" Unable to create face_recogniser  \n")

	# Add nvvidconv1 and filter1 to convert the frames to RGBA
	# which is easier to work with in Python.
	print("Creating nvvidconv1 \n ")
	nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
	if not nvvidconv1:
		sys.stderr.write(" Unable to create nvvidconv1 \n")
	print("Creating filter1 \n ")
	caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
	filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
	if not filter1:
		sys.stderr.write(" Unable to get the caps filter1 \n")
	filter1.set_property("caps", caps1)
	print("Creating tiler \n ")
	tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
	if not tiler:
		sys.stderr.write(" Unable to create tiler \n")
	print("Creating nvvidconv \n ")
	nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
	if not nvvidconv:
		sys.stderr.write(" Unable to create nvvidconv \n")
	print("Creating nvosd \n ")
	nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
	if not nvosd:
		sys.stderr.write(" Unable to create nvosd \n")
		
	queue1=Gst.ElementFactory.make("queue", "nvtee-que1")
	if not queue1:
		sys.stderr.write(" Unable to create queue1 \n")
	if (is_aarch64()):
		print("Creating transform \n ")
		# transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
		transform = Gst.ElementFactory.make("queue", "queue")
		if not transform:
			sys.stderr.write(" Unable to create transform \n")

	print("Creating EGLSink \n")
	# sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
	sink = Gst.ElementFactory.make("nvoverlaysink", "nvvideo-renderer")
	if not sink:
		sys.stderr.write(" Unable to create egl sink \n")

	if is_live:
		print("Atleast one of the sources is live")
		streammux.set_property('live-source', 1)

	streammux.set_property('width', 1920)
	streammux.set_property('height', 1080)
	streammux.set_property('batch-size', number_sources)
	streammux.set_property('batched-push-timeout', 400000)
	face_recogniser.set_property('config-file-path', "face_recogniser_config.txt")
	face_detector.set_property('config-file-path', "face_detector_config.txt")
	pgie_batch_size = face_detector.get_property("batch-size")
	if (pgie_batch_size != number_sources):
		print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
			  number_sources, " \n")
		face_detector.set_property("batch-size", number_sources)
	tiler_rows = int(math.sqrt(number_sources))
	tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
	tiler.set_property("rows", tiler_rows)
	tiler.set_property("columns", tiler_columns)
	tiler.set_property("width", TILED_OUTPUT_WIDTH)
	tiler.set_property("height", TILED_OUTPUT_HEIGHT)

	sink.set_property("sync", 0)

	if not is_aarch64():
		# Use CUDA unified memory in the pipeline so frames
		# can be easily accessed on CPU in Python.
		mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
		streammux.set_property("nvbuf-memory-type", mem_type)
		nvvidconv.set_property("nvbuf-memory-type", mem_type)
		nvvidconv1.set_property("nvbuf-memory-type", mem_type)
		tiler.set_property("nvbuf-memory-type", mem_type)
	
	#Set properties of tracker
	config = configparser.ConfigParser()
	config.read('dstest2_tracker_config.txt')
	config.sections()

	for key in config['tracker']:
		if key == 'tracker-width' :
			tracker_width = config.getint('tracker', key)
			tracker.set_property('tracker-width', tracker_width)
		if key == 'tracker-height' :
			tracker_height = config.getint('tracker', key)
			tracker.set_property('tracker-height', tracker_height)
		if key == 'gpu-id' :
			tracker_gpu_id = config.getint('tracker', key)
			tracker.set_property('gpu_id', tracker_gpu_id)
		if key == 'll-lib-file' :
			tracker_ll_lib_file = config.get('tracker', key)
			tracker.set_property('ll-lib-file', tracker_ll_lib_file)
		if key == 'll-config-file' :
			tracker_ll_config_file = config.get('tracker', key)
			tracker.set_property('ll-config-file', tracker_ll_config_file)
		if key == 'enable-batch-process' :
			tracker_enable_batch_process = config.getint('tracker', key)
			tracker.set_property('enable_batch_process', tracker_enable_batch_process)
		if key == 'enable-past-frame' :
			tracker_enable_past_frame = config.getint('tracker', key)
			tracker.set_property('enable_past_frame', tracker_enable_past_frame)

	print("Adding elements to Pipeline \n")
	pipeline.add(face_detector)
	pipeline.add(tracker)
	pipeline.add(face_recogniser)
	pipeline.add(tiler)
	pipeline.add(nvvidconv)
	pipeline.add(filter1)
	pipeline.add(nvvidconv1)
	pipeline.add(nvosd)
	pipeline.add(queue1)
	if is_aarch64():
		pipeline.add(transform)
	pipeline.add(sink)

	print("Linking elements in the Pipeline \n")
	streammux.link(face_detector)
	face_detector.link(tracker)
	tracker.link(nvvidconv1)
	nvvidconv1.link(filter1)
	filter1.link(tiler)
	tiler.link(face_recogniser)
	# queue1.link(face_recogniser)
	face_recogniser.link(nvvidconv)
	nvvidconv.link(nvosd)
	if is_aarch64():
		nvosd.link(transform)
		transform.link(sink)
	else:
		nvosd.link(sink)

	# create an event loop and feed gstreamer bus mesages to it
	loop = GObject.MainLoop()
	bus = pipeline.get_bus()
	bus.add_signal_watch()
	bus.connect("message", bus_call, loop)

	tiler_sink_pad = tiler.get_static_pad("sink")
	if not tiler_sink_pad:
		sys.stderr.write(" Unable to get src pad \n")
	else:
		tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)

	vidconvsinkpad = nvvidconv.get_static_pad("sink")
	if not vidconvsinkpad:
		sys.stderr.write(" Unable to get sink pad of nvvidconv \n")

	vidconvsinkpad.add_probe(Gst.PadProbeType.BUFFER, sgie_sink_pad_buffer_probe, 0)

	# List the sources
	print("Now playing...")
	for i, source in enumerate(args[:-1]):
		if i != 0:
			print(i, ": ", source)

	print("Starting pipeline \n")
	# start play back and listed to events		
	pipeline.set_state(Gst.State.PLAYING)
	try:
		loop.run()
	except:
		pass
	# cleanup
	print("Exiting app\n")
	pipeline.set_state(Gst.State.NULL)


if __name__ == '__main__':
	sys.exit(main(sys.argv))

face_detector config:

    [property]
gpu-id=0
process-mode=1

net-scale-factor=0.0039215697906911373
#onnx-file=/home/jetson-nx/codes/models/MobileSSD_face_detection_model/MobileSSD_face_detection.onnx
model-engine-file=/opt/nvidia/deepstream/deepstream-5.1/samples/models/Secondary_FaceDetect/fd_lpd.caffemodel_b1_gpu0_fp32.engine
labelfile-path=/opt/nvidia/deepstream/deepstream-5.1/samples/models/Secondary_FaceDetect/labels.txt

model-file=/opt/nvidia/deepstream/deepstream-5.1/samples/models/Secondary_FaceDetect/fd_lpd.caffemodel
proto-file=/opt/nvidia/deepstream/deepstream-5.1/samples/models/Secondary_FaceDetect/fd_lpd.prototxt

#force-implicit-batch-dim=1
batch-size=1
network-mode=0

num-detected-classes=3
interval=2

gie-unique-id=2
#operate-on-gie-id=1
#operate-on-class-ids=2

output-blob-names=output_bbox;output_cov

input-object-min-width=64
input-object-min-height=64

maintain-aspect-ratio=1


[class-attrs-all]
pre-cluster-threshold=0.7
#Post-cluster-threshold =0.7
#threshold= 0.7
eps=0.2
group-threshold=1
```

face_recogniser config:
```
    gpu-id=0
process-mode=2

#net-scale-factor=0.00329215686274
net-scale-factor=0.0189601459307
offsets=112.86182266638355;112.86182266638355;112.86182266638355

#onnx-file=/home/jetson-nx/codes/models/facenet/agx_facenet_dynamic_model.onnx
model-engine-file=/home/jetson-nx/codes/models/facenet/agx_facenet_dynamic_model.onnx_b16_gpu0_fp16.engine
#force-implicit-batch-dim=1
batch-size=16
# 0=FP32 and 1=INT8 2=FP16 mode 
network-mode=2

gie-unique-id=3
operate-on-gie-id=2
operate-on-class-ids=0

is-classifier=1
classifier-async-mode=0

#infer-dims=3;160;160
#input-object-min-width=30
#input-object-min-height=30
model-color-format=1

output-tensor-meta=1

You can not attach SGIE after nvmultistreamtiler.

For PGIE+SGIE sample, please refer to deepstream-test2. deepstream_python_apps/apps/deepstream-test2 at master · NVIDIA-AI-IOT/deepstream_python_apps · GitHub

I made some change in linking but still the same problem
– the problem came only when I enable the tracker

        streammux.link(face_detector)
	face_detector.link(tracker)
	tracker.link(nvvidconv1)
	nvvidconv1.link(filter1)
    filter1.link(face_recogniser)
	face_recogniser.link(tiler)
	tiler.link(nvvidconv)
	nvvidconv.link(nvosd)
	if is_aarch64():
		nvosd.link(transform)
		transform.link(sink)
	else:
		nvosd.link(sink)


vidconvsinkpad = face_recogniser.get_static_pad("src")
	if not vidconvsinkpad:
		sys.stderr.write(" Unable to get sink pad of nvvidconv \n")

	vidconvsinkpad.add_probe(Gst.PadProbeType.BUFFER, sgie_sink_pad_buffer_probe, 0)

BTW It works fine in Deepstream 5.0.1

Why do you add nvvideoconvert between tarcker and sgie?

Since the same app can work on deepstream5.0.1, it may be the model problem. DS5.1 needs JetPack 4.5.1, have you install the package correctly?

1 - I add nvvideoconvertor because of RGBA format

2- If the model problem why it works without tracker?

3- I tried another jetson device (nano) with version 4.5.1 and the problem same

Can deepstream-test2 work on your platform? If so, there is no problem with tracker. You can compare your settings and configuration with deepstream-test2

hello
The app works when I change network-type to100

What does 100 means and when should I use it?

network-type=100 means other types. gst-nvinfer will do nothing with the model output

Thank you