Segmentation fault on "nvv4l2decoder" of multiple camera

• Hardware Platform (Jetson / GPU)
Jetson AGX Xavier
• DeepStream Version
5.1
• JetPack Version (valid for Jetson only)
4.5.1
• TensorRT Version
7.1.3.0
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
Segmentation fault on multiple camera of “nvv4l2decoder”
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
Due to Nvidia employee mentioned:
The nvjpegdec plugin is not supported in DeepStream SDK Using nvv4l2decoder(mjpeg=1) to decode mjpeg camera stream.

Working pipeline:
Single Camera on HW Accel decoding:

v4l2src -> capsfilter(image/jpeg) -> jpegparse -> nvv4l2decoder(mjpeg=1) -> nvvideoconvert -> capsfilter(caps=video/x-raw(memory:NVMM)) -> nvstreammux -> nvinfer -> nvmultistreamtiler -> nvdsosd -> nvegltransform -> nveglglessink
    Multi-Cameras on software decoding: 
v4l2src(source=0) -> capsfilter(image/jpeg) -> jpegdec -> nvvideoconvert -> capsfilter(caps=video/x-raw(memory:NVMM)) -> nvstreammux
v4l2src(source=1) -> capsfilter(image/jpeg) -> jpegdec -> nvvideoconvert -> capsfilter(caps=video/x-raw(memory:NVMM)) -> nvstreammux
nvstreammux -> nvinfer -> nvmultistreamtiler -> nvdsosd -> nvegltransform -> nveglglessink

Segmentation fault pipeline:
Multi-Cameras on HW Accel decoding:

v4l2src(source=0) -> capsfilter(image/jpeg) -> jpegparse -> nvv4l2decoder(mjpeg=1) -> nvvideoconvert -> capsfilter(caps=video/x-raw(memory:NVMM)) -> nvstreammux
v4l2src(source=1) -> capsfilter(image/jpeg) -> jpegparse -> nvv4l2decoder(mjpeg=1) -> nvvideoconvert -> capsfilter(caps=video/x-raw(memory:NVMM)) -> nvstreammux
nvstreammux -> nvinfer -> nvmultistreamtiler -> nvdsosd -> nvegltransform -> nveglglessink

• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)

#!/usr/bin/env python3
import time
import math
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst

WIDTH = 1920
HEIGHT = 1080

HW_ACCEL = True
# HW_ACCEL = False
	
def bus_call(bus, message, loop):
	t = message.type
	if t == Gst.MessageType.EOS:
		sys.stdout.write("End-of-stream\n")
		loop.quit()
	elif t==Gst.MessageType.WARNING:
		err, debug = message.parse_warning()
		sys.stderr.write("Warning: %s: %s\n" % (err, debug))
	elif t == Gst.MessageType.ERROR:
		err, debug = message.parse_error()
		sys.stderr.write("Error: %s: %s\n" % (err, debug))
		loop.quit()
	return True

def create_source_bin(index):
	camera = '/dev/video%d' % index
	print(camera)

	nbin=Gst.Bin.new("source-bin-%d" % index)
	if not nbin:
		sys.stderr.write(" Unable to create source bin")

	source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
	if not source:
		sys.stderr.write(" Unable to create source")
	source.set_property('device', camera)
	source.set_property('io-mode', 2)
	nbin.add(source)

	caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
	if not caps_v4l2src:
		sys.stderr.write("Could not create caps_v4l2src")
	caps_v4l2src.set_property('caps', Gst.Caps.from_string("image/jpeg, width=%d, height=%d, framerate=30/1" % (WIDTH, HEIGHT)))
	nbin.add(caps_v4l2src)
	
	if HW_ACCEL:
		jpegparse = Gst.ElementFactory.make("jpegparse", "jpeg-parser")
		if not jpegparse:
			sys.stderr.write(" Unable to create jpeg parser")
		nbin.add(jpegparse)
			
		nvv4l2decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2decoder")
		if not nvv4l2decoder:
			sys.stderr.write(" Unable to create nvv4l2decoder")
		nvv4l2decoder.set_property('mjpeg', 1)
		nbin.add(nvv4l2decoder)
	else:
		jpegdec = Gst.ElementFactory.make("jpegdec", "jpegdec")
		if not jpegdec:
			sys.stderr.write(" Unable to create jpegdec")
		nbin.add(jpegdec)

	nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "nvvideoconvert")
	if not nvvidconvsrc:
		sys.stderr.write(" Unable to create nvvideoconvert")
	nbin.add(nvvidconvsrc)

	caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "capsfilter")
	if not caps_vidconvsrc:
		sys.stderr.write(" Unable to create capsfilter")
	caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
	nbin.add(caps_vidconvsrc)

	source.link(caps_v4l2src)
	caps_v4l2src.link(videorate)
	if HW_ACCEL:
		videorate.link(jpegparse)
		jpegparse.link(nvv4l2decoder)
		nvv4l2decoder.link(nvvidconvsrc)
	else:
		videorate.link(jpegdec)
		jpegdec.link(nvvidconvsrc)
	nvvidconvsrc.link(videoscale)
	videoscale.link(caps_vidconvsrc)

	srcpad = caps_vidconvsrc.get_static_pad("src")
	bin_pad=nbin.add_pad(Gst.GhostPad.new("src",srcpad))
	if not bin_pad:
		sys.stderr.write(" Failed to add ghost pad in source bin")
		return None
	return nbin
	
def run(number_sources):
	GObject.threads_init()
	Gst.init(None)
	pipeline = Gst.Pipeline()
	if not pipeline:
		sys.stderr.write(" Unable to create Pipeline")

	streammux = Gst.ElementFactory.make("nvstreammux", "streammux")
	if not streammux:
		sys.stderr.write(" Unable to create NvStreamMux")
	streammux.set_property('width', WIDTH)
	streammux.set_property('height', HEIGHT)
	streammux.set_property('batch-size', number_sources)
	streammux.set_property('batched-push-timeout', 1000 * 1000) # 1000ms
	streammux.set_property('live-source', 1)
	pipeline.add(streammux)

	pgie = Gst.ElementFactory.make("nvinfer", "pgie")
	if not pgie:
		sys.stderr.write(" Unable to create pgie")
	pgie.set_property('config-file-path', "config.txt")
	pgie.set_property("batch-size", number_sources)
	pipeline.add(pgie)
	
	caps_filter = Gst.ElementFactory.make("capsfilter", "caps_filter")
	if not caps_filter:
		sys.stderr.write(" Unable to create capsfilter")
	caps_filter.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA"))
	pipeline.add(caps_filter)
	
	tiler = Gst.ElementFactory.make("nvmultistreamtiler", "tiler")
	if not tiler:
		sys.stderr.write(" Unable to create tiler")
	tiler_rows = int(math.sqrt(number_sources))
	tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
	tiler.set_property("rows", tiler_rows)
	tiler.set_property("columns", tiler_columns)
	tiler.set_property("width", WIDTH)
	tiler.set_property("height", HEIGHT)
	pipeline.add(tiler)

	nvosd = Gst.ElementFactory.make("nvdsosd", "nvosd")
	if not nvosd:
		sys.stderr.write(" Unable to create nvosd")
	pipeline.add(nvosd)

	transform = Gst.ElementFactory.make("nvegltransform", "transform")
	if not transform:
		sys.stderr.write(" Unable to create transform")
	pipeline.add(transform)
	
	sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
	if not sink:
		sys.stderr.write(" Unable to create egl sink")
	sink.set_property('sync', False)
	pipeline.add(sink)

	for index in range(number_sources):
		source_bin=create_source_bin(index)
		if not source_bin:
			sys.stderr.write("Unable to create source bin")
		pipeline.add(source_bin)
		sinkpad= streammux.get_request_pad("sink_%d" % index) 
		if not sinkpad:
			sys.stderr.write("Unable to create sink pad bin")
		srcpad=source_bin.get_static_pad("src")
		if not srcpad:
			sys.stderr.write("Unable to create src pad bin")
		srcpad.link(sinkpad)
	
	streammux.link(pgie)
	pgie.link(caps_filter)
	caps_filter.link(tiler)
	tiler.link(nvosd)
	nvosd.link(transform)
	transform.link(sink)

	sinkpad = tiler.get_static_pad("sink")
	if not sinkpad:
		sys.stderr.write(" Unable to get sink pad")

	loop = GObject.MainLoop()
	bus = pipeline.get_bus()
	bus.add_signal_watch()
	bus.connect("message", bus_call, loop)

	pipeline.set_state(Gst.State.PLAYING)
	try:
		loop.run()
	except:
		pass
	pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
	number_sources = 2
	run(number_sources)

Hi,
I run into videorate and videoscale not defined issue. have add videorate element, but met videoscale not defined issue, please share one workable python sample for repro.

NameError: global name ‘videoscale’ is not defined

thanks.

@Amycao Just remove them from the pipline

#!/usr/bin/env python3
import time
import math
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst

WIDTH = 1920
HEIGHT = 1080

HW_ACCEL = True
# HW_ACCEL = False
	
def bus_call(bus, message, loop):
	t = message.type
	if t == Gst.MessageType.EOS:
		sys.stdout.write("End-of-stream\n")
		loop.quit()
	elif t==Gst.MessageType.WARNING:
		err, debug = message.parse_warning()
		sys.stderr.write("Warning: %s: %s\n" % (err, debug))
	elif t == Gst.MessageType.ERROR:
		err, debug = message.parse_error()
		sys.stderr.write("Error: %s: %s\n" % (err, debug))
		loop.quit()
	return True

def create_source_bin(index):
	camera = '/dev/video%d' % index
	print(camera)

	nbin=Gst.Bin.new("source-bin-%d" % index)
	if not nbin:
		sys.stderr.write(" Unable to create source bin")

	source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
	if not source:
		sys.stderr.write(" Unable to create source")
	source.set_property('device', camera)
	source.set_property('io-mode', 2)
	nbin.add(source)

	caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
	if not caps_v4l2src:
		sys.stderr.write("Could not create caps_v4l2src")
	caps_v4l2src.set_property('caps', Gst.Caps.from_string("image/jpeg, width=%d, height=%d, framerate=30/1" % (WIDTH, HEIGHT)))
	nbin.add(caps_v4l2src)
	
	if HW_ACCEL:
		jpegparse = Gst.ElementFactory.make("jpegparse", "jpeg-parser")
		if not jpegparse:
			sys.stderr.write(" Unable to create jpeg parser")
		nbin.add(jpegparse)
			
		nvv4l2decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2decoder")
		if not nvv4l2decoder:
			sys.stderr.write(" Unable to create nvv4l2decoder")
		nvv4l2decoder.set_property('mjpeg', 1)
		nbin.add(nvv4l2decoder)
	else:
		jpegdec = Gst.ElementFactory.make("jpegdec", "jpegdec")
		if not jpegdec:
			sys.stderr.write(" Unable to create jpegdec")
		nbin.add(jpegdec)

	nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "nvvideoconvert")
	if not nvvidconvsrc:
		sys.stderr.write(" Unable to create nvvideoconvert")
	nbin.add(nvvidconvsrc)

	caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "capsfilter")
	if not caps_vidconvsrc:
		sys.stderr.write(" Unable to create capsfilter")
	caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
	nbin.add(caps_vidconvsrc)

	source.link(caps_v4l2src)
	if HW_ACCEL:
		caps_v4l2src.link(jpegparse)
		jpegparse.link(nvv4l2decoder)
		nvv4l2decoder.link(nvvidconvsrc)
	else:
		caps_v4l2src.link(jpegdec)
		jpegdec.link(nvvidconvsrc)
	nvvidconvsrc.link(caps_vidconvsrc)

	srcpad = caps_vidconvsrc.get_static_pad("src")
	bin_pad=nbin.add_pad(Gst.GhostPad.new("src",srcpad))
	if not bin_pad:
		sys.stderr.write(" Failed to add ghost pad in source bin")
		return None
	return nbin
	
def run(number_sources):
	GObject.threads_init()
	Gst.init(None)
	pipeline = Gst.Pipeline()
	if not pipeline:
		sys.stderr.write(" Unable to create Pipeline")

	streammux = Gst.ElementFactory.make("nvstreammux", "streammux")
	if not streammux:
		sys.stderr.write(" Unable to create NvStreamMux")
	streammux.set_property('width', WIDTH)
	streammux.set_property('height', HEIGHT)
	streammux.set_property('batch-size', number_sources)
	streammux.set_property('batched-push-timeout', 1000 * 1000) # 1000ms
	streammux.set_property('live-source', 1)
	pipeline.add(streammux)

	pgie = Gst.ElementFactory.make("nvinfer", "pgie")
	if not pgie:
		sys.stderr.write(" Unable to create pgie")
	pgie.set_property('config-file-path', "config.txt")
	pgie.set_property("batch-size", number_sources)
	pipeline.add(pgie)
	
	caps_filter = Gst.ElementFactory.make("capsfilter", "caps_filter")
	if not caps_filter:
		sys.stderr.write(" Unable to create capsfilter")
	caps_filter.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA"))
	pipeline.add(caps_filter)
	
	tiler = Gst.ElementFactory.make("nvmultistreamtiler", "tiler")
	if not tiler:
		sys.stderr.write(" Unable to create tiler")
	tiler_rows = int(math.sqrt(number_sources))
	tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
	tiler.set_property("rows", tiler_rows)
	tiler.set_property("columns", tiler_columns)
	tiler.set_property("width", WIDTH)
	tiler.set_property("height", HEIGHT)
	pipeline.add(tiler)

	nvosd = Gst.ElementFactory.make("nvdsosd", "nvosd")
	if not nvosd:
		sys.stderr.write(" Unable to create nvosd")
	pipeline.add(nvosd)

	transform = Gst.ElementFactory.make("nvegltransform", "transform")
	if not transform:
		sys.stderr.write(" Unable to create transform")
	pipeline.add(transform)
	
	sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
	if not sink:
		sys.stderr.write(" Unable to create egl sink")
	sink.set_property('sync', False)
	pipeline.add(sink)

	for index in range(number_sources):
		source_bin=create_source_bin(index)
		if not source_bin:
			sys.stderr.write("Unable to create source bin")
		pipeline.add(source_bin)
		sinkpad= streammux.get_request_pad("sink_%d" % index) 
		if not sinkpad:
			sys.stderr.write("Unable to create sink pad bin")
		srcpad=source_bin.get_static_pad("src")
		if not srcpad:
			sys.stderr.write("Unable to create src pad bin")
		srcpad.link(sinkpad)
	
	streammux.link(pgie)
	pgie.link(caps_filter)
	caps_filter.link(tiler)
	tiler.link(nvosd)
	nvosd.link(transform)
	transform.link(sink)

	sinkpad = tiler.get_static_pad("sink")
	if not sinkpad:
		sys.stderr.write(" Unable to get sink pad")

	loop = GObject.MainLoop()
	bus = pipeline.get_bus()
	bus.add_signal_watch()
	bus.connect("message", bus_call, loop)

	pipeline.set_state(Gst.State.PLAYING)
	try:
		loop.run()
	except:
		pass
	pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
	number_sources = 2
	run(number_sources)

Please try patch from this topic,
Libv4l2_nvargus.so source code - Jetson & Embedded Systems / Jetson Nano - NVIDIA Developer Forums

1 Like

That’s working! please integrated in next software version