Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) Jetson
• DeepStream Version 6.0.1
• JetPack Version (valid for Jetson only) 4.6
• TensorRT Version
• NVIDIA GPU Driver Version (valid for GPU only)
Hi, I have a developed a deepstream python pipeline for multiple CSI Camera. I am facing issue while running the pipeline on input size of 3264*2464.
Here the main function of the pipeline
def main(args):
if len(args) < 1:
sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN] \n" % args[0])
sys.exit(1)
args.append('/dev/video0')
args.append('/dev/video1')
global perf_data, gsv, pipeline
perf_data = PERF_DATA(len(args) - 1)
number_sources = len(args) - 1
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
global channel, connection
try:
channel, connection = initializeChannel()
except (pika.exceptions.StreamLostError,
pika.exceptions.ConnectionClosed,
pika.exceptions.ChannelClosed,
pika.exceptions.AMQPConnectionError,
pika.exceptions.ConnectionWrongStateError,
ConnectionResetError) as e:
logger.info(" Failed to establish connection.")
try:
connection.close()
except:
pass
logger.info("CIGAR DETECTION MODULE: Deactivated")
sys.exit()
except Exception as e:
print(e)
Gst.init(None)
# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
cameras_list = [
{"source":0, "name":"Camera 1"},
{"source":1, "name":"Camera 2"},
]
is_live = False
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
pipeline.add(streammux)
for camera in cameras_list:
source = Gst.ElementFactory.make("nvarguscamerasrc", "source-"+camera['name'])
if not source:
sys.stderr.write(" Unable to create Source \n")
source.set_property('sensor-id',camera['source'])
source.set_property('bufapi-version', True)
source.set_property('sensor_mode',0)
#source.set_property('num-buffers',15)
caps = Gst.ElementFactory.make("capsfilter", "source-caps-source-"+camera['name'])
if not caps:
sys.stderr.write(" Unable to create capsfilter \n")
caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=3264, height=2464, framerate=21/1, format=NV12"))
pipeline.add(source)
pipeline.add(caps)
sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))
srcpad = source.get_static_pad("src")
if not sinkpad:
print("Unable to create source sink pad")
exit(0)
if not srcpad:
print("Unable to create source src pad")
exit(0)
srcpad.link(sinkpad)
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
print("Creating nvvidconv1 \n ")
nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
if not nvvidconv1:
sys.stderr.write(" Unable to create nvvidconv1 \n")
print("Creating filter1 \n ")
caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
if not filter1:
sys.stderr.write(" Unable to get the caps filter1 \n")
filter1.set_property("caps", caps1)
print("Creating tiler \n ")
tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
if not tiler:
sys.stderr.write(" Unable to create tiler \n")
print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd \n ")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
if is_aarch64():
print("Creating nv3dsink \n")
sink = Gst.ElementFactory.make("fakesink", "fakesink")
if not sink:
sys.stderr.write(" Unable to create nv3dsink \n")
else:
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("fakesink", "fakesink")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
streammux.set_property('width', 3264)
streammux.set_property('height', 2464)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "configs/pgie_yolov8.txt")
pgie_batch_size = pgie.get_property("batch-size")
if (pgie_batch_size != number_sources):
print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
number_sources, " \n")
pgie.set_property("batch-size", number_sources)
tiler_rows = int(math.sqrt(number_sources))
tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
tiler.set_property("rows", tiler_rows)
tiler.set_property("columns", tiler_columns)
tiler.set_property("width", 3264)
tiler.set_property("height", 2464)
sink.set_property("sync", 0)
sink.set_property("qos", 0)
if not is_aarch64():
# Use CUDA unified memory in the pipeline so frames
# can be easily accessed on CPU in Python.
mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
streammux.set_property("nvbuf-memory-type", mem_type)
nvvidconv.set_property("nvbuf-memory-type", mem_type)
nvvidconv1.set_property("nvbuf-memory-type", mem_type)
tiler.set_property("nvbuf-memory-type", mem_type)
print("Adding elements to Pipeline \n")
pipeline.add(pgie)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(filter1)
pipeline.add(nvvidconv1)
pipeline.add(nvosd)
pipeline.add(sink)
streammux.link(pgie)
pgie.link(nvvidconv1)
nvvidconv1.link(filter1)
filter1.link(tiler)
tiler.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
gsv = GenerateSendVideo()
gsv.start()
tiler_sink_pad = tiler.get_static_pad("sink")
if not tiler_sink_pad:
sys.stderr.write(" Unable to get src pad \n")
else:
tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)
GLib.timeout_add(5000, perf_data.perf_print_callback)
# List the sources
print("Now playing...")
for i, source in enumerate(args[:]):
if i != 0:
print(i, ": ", source)
print("Starting pipeline \n")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
signal_handler(signal.SIGINT, None)
# cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
These are the 6 available senor modes
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1640 x 1232 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
When I set the sensor mode to 2, 3, 4, 5 using the following code. It works fine
source.set_property('sensor_mode',2)
When I try to run it on sensor mode 0 or 1 It throws the following error.
GST_ARGUS: Creating output stream
GST_ARGUS: Creating output stream
CONSUMER: Waiting until producer is connected...
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1640 x 1232 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
ARGUS_ERROR: Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute: 880 Frame Rate specified is greater than supported
GST_ARGUS: Running with following settings:
Camera index = 0
Camera mode = 0
Output Stream W = 3264 H = 2464
seconds to Run = 0
Frame Rate = 21.000000
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Waiting until producer is connected...
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1640 x 1232 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
ARGUS_ERROR: Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute: 880 Frame Rate specified is greater than supported
GST_ARGUS: Running with following settings:
Camera index = 1
Camera mode = 0
Output Stream W = 3264 H = 2464
seconds to Run = 0
Frame Rate = 21.000000
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
ARGUS_ERROR: Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute: 1100 InvalidState.
GST_ARGUS: Cleaning up
nvbuf_utils: dmabuf_fd -1 mapped entry NOT found
nvbuf_utils: Can not get HW buffer from FD... Exiting...
CONSUMER: ERROR OCCURRED
Error: NvArgusCameraSrc: CANCELLED (7) Argus Error Status
Stopping pipeline...
CONSUMER: Producer has connected; continuing.
ARGUS_ERROR: Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute: 1100 InvalidState.
GST_ARGUS: Cleaning up
nvbuf_utils: dmabuf_fd -1 mapped entry NOT found
nvbuf_utils: Can not get HW buffer from FD... Exiting...
CONSUMER: ERROR OCCURRED
Pipeline stopped.
My Question How can I run this pipeline on 3264 * 2464.