Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) : Jetson Orin Nano 8GB Developer kit
• DeepStream Version : Deepstream 6.3
• JetPack Version (valid for Jetson only) : JetPack SDK 5.1.2
• TensorRT Version : TensorRT 8.5.2-1+cuda11.4
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs) Encoder change
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)
I want to run Yolov5 via python binding with Orin nano board and pass the results to rtsp server. However, in the case of orin nano, nvv4l2h264enc is not supported hardware-wise, so I searched for information and found an encoder that can solve the problem through software. It’s x264enc, but it’s still giving me an error. Is there anyone who can help?
The code is as follows.
def main(args):
global poly, ROI_line, roi_factor,obj_counter_array, obj_ids_array,obj_counter
global timestamp, ROI_Area, loitering_time_check, loitering_time_check_arr, frame_last_check_time, stop_key, zero_key, time_format, intrusion_start_fcheck
# Check input arguments
number_sources = len(args)
# Standard GStreamer initialization
Gst.init(None)
# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
is_live = False
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
pipeline.add(streammux)
for i in range(number_sources):
print("Creating source_bin ", i, " \n ")
uri_name = args[i]
if uri_name.find("rtsp://") == 0:
is_live = True
source_bin = create_source_bin(i, uri_name)
if not source_bin:
sys.stderr.write("Unable to create source bin \n")
pipeline.add(source_bin)
padname = "sink_%u" % i
sinkpad = streammux.get_request_pad(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad = source_bin.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
print("Creating Pgie \n ")
if ts_from_rtsp:
streammux.set_property("attach-sys-ts", 0)
if gie=="nvinfer":
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
else:
pgie = Gst.ElementFactory.make("nvinferserver", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
tracker = Gst.ElementFactory.make("nvtracker", "tracker")
if not tracker:
sys.stderr.write(" Unable to create tracker \n")
print("Creating tiler \n ")
tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
if not tiler:
sys.stderr.write(" Unable to create tiler \n")
print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd \n ")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
nvvidconv_postosd = Gst.ElementFactory.make(
"nvvideoconvert", "convertor_postosd")
if not nvvidconv_postosd:
sys.stderr.write(" Unable to create nvvidconv_postosd \n")
# Create a caps filter
caps = Gst.ElementFactory.make("capsfilter", "filter")
caps.set_property(
"caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")
)
# Make the encoder
if codec == "H264":
#encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
encoder = Gst.ElementFactory.make("x264enc", "encoder")
print("Creating H264 Encoder")
elif codec == "H265":
encoder = Gst.ElementFactory.make("x265enc", "encoder")
print("Creating H265 Encoder")
if not encoder:
sys.stderr.write(" Unable to create encoder")
encoder.set_property("bitrate", bitrate)
#if is_aarch64():
#encoder.set_property("preset-level", 1)
#encoder.set_property("insert-sps-pps", 1)
#encoder.set_property("bufapi-version", 1)
# Make the payload-encode video into RTP packets
if codec == "H264":
rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
print("Creating H264 rtppay")
elif codec == "H265":
rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay")
print("Creating H265 rtppay")
if not rtppay:
sys.stderr.write(" Unable to create rtppay")
# Make the UDP sink
updsink_port_num = 5400
sink = Gst.ElementFactory.make("udpsink", "udpsink")
if not sink:
sys.stderr.write(" Unable to create udpsink")
sink.set_property("host", "224.224.255.255")
sink.set_property("port", updsink_port_num)
sink.set_property("async", False)
sink.set_property("sync", 1)
streammux.set_property("width", 1920)
streammux.set_property("height", 1080)
streammux.set_property("batch-size", 1)
streammux.set_property("batched-push-timeout", 4000000)
pgie.set_property('config-file-path', "config_infer_primary_yoloV5.txt")
#Set properties of tracker
config = configparser.ConfigParser()
config.read('tracker_config.txt')
config.sections()
for key in config['tracker']:
if key == 'tracker-width' :
tracker_width = config.getint('tracker', key)
tracker.set_property('tracker-width', tracker_width)
if key == 'tracker-height' :
tracker_height = config.getint('tracker', key)
tracker.set_property('tracker-height', tracker_height)
if key == 'gpu-id' :
tracker_gpu_id = config.getint('tracker', key)
tracker.set_property('gpu_id', tracker_gpu_id)
if key == 'll-lib-file' :
tracker_ll_lib_file = config.get('tracker', key)
tracker.set_property('ll-lib-file', tracker_ll_lib_file)
if key == 'll-config-file' :
tracker_ll_config_file = config.get('tracker', key)
tracker.set_property('ll-config-file', tracker_ll_config_file)
if key == 'enable-batch-process' :
tracker_enable_batch_process = config.getint('tracker', key)
tracker.set_property('enable_batch_process', tracker_enable_batch_process)
if key == 'enable-past-frame' :
tracker_enable_past_frame = config.getint('tracker', key)
tracker.set_property('enable_past_frame', tracker_enable_past_frame)
# tracker.set_property('tracker-width', 640)
# tracker.set_property('tracker-height', 384)
pgie_batch_size = pgie.get_property("batch-size")
if pgie_batch_size != number_sources:
print(
"WARNING: Overriding infer-config batch-size",
pgie_batch_size,
" with number of sources ",
number_sources,
" \n",
)
pgie.set_property("batch-size", number_sources)
print("Adding elements to Pipeline \n")
tiler_rows = int(math.sqrt(number_sources))
tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
tiler.set_property("rows", tiler_rows)
tiler.set_property("columns", tiler_columns)
tiler.set_property("width", TILED_OUTPUT_WIDTH)
tiler.set_property("height", TILED_OUTPUT_HEIGHT)
sink.set_property("qos", 0)
pipeline.add(pgie)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(tracker)
pipeline.add(nvosd)
pipeline.add(nvvidconv_postosd)
pipeline.add(caps)
pipeline.add(encoder)
pipeline.add(rtppay)
pipeline.add(sink)
streammux.link(pgie)
pgie.link(tracker)
tracker.link(nvvidconv)
pgie.link(nvvidconv)
nvvidconv.link(tiler)
tiler.link(nvosd)
nvosd.link(nvvidconv_postosd)
nvvidconv_postosd.link(caps)
caps.link(encoder)
encoder.link(rtppay)
rtppay.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)
# Start streaming
rtsp_port_num = 8554
server = GstRtspServer.RTSPServer.new()
server.props.service = "%d" % rtsp_port_num
server.attach(None)
factory = GstRtspServer.RTSPMediaFactory.new()
factory.set_launch(
'( udpsrc name=pay0 port=%d buffer-size=524288 caps="application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 " )'
% (updsink_port_num, codec)
)
factory.set_shared(True)
server.get_mount_points().add_factory("/ds-test", factory)
print(
"\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n"
% rtsp_port_num
)
# start play back and listen to events
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except BaseException:
pass
# cleanup
pipeline.set_state(Gst.State.NULL)
ps. Currently, the above code is executed, but the video is not played even when entering the rtspserver address.

