how to use opencv to process image frame( format:array) → input deepstream pipeline → output rtsp stream
my code below:
def main(args):
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
pipeline.add(streammux)
for i in range(number_sources):
print("Creating source_bin ",i," \n ")
appsource = Gst.ElementFactory.make("appsrc", "numpy-source")
padname="sink_%u" %i
sinkpad= streammux.get_request_pad(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
print("Creating Pgie \n ")
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
print("Creating tiler \n ")
tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
if not tiler:
sys.stderr.write(" Unable to create tiler \n")
print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd \n ")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
nvosd.set_property('process-mode',OSD_PROCESS_MODE)
nvosd.set_property('display-text',OSD_DISPLAY_TEXT)
if(is_aarch64()):
print("Creating transform \n ")
transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
if not transform:
sys.stderr.write(" Unable to create transform \n")
transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
#===========rtsp=============#
nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "nv-videoconv")
if not nvvidconv_postosd:
sys.stderr.write(" Unable to create nvvidconv_postosd \n")
caps = Gst.ElementFactory.make("capsfilter", "filter")
caps_in=Gst.Caps.from_string("video/x-raw,format=RGBA,width=640,height=480,framerate=30/1")
appsource.set_property('caps',caps_in)
caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=NV12,width=640,height=480,framerate=30/1"))
encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
encoder.set_property('bitrate', 4000000)
encoder.set_property('preset-level', 1)
encoder.set_property('insert-sps-pps', 1)
encoder.set_property('bufapi-version', 1)
rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
updsink_port_num = 5400
udpsink = Gst.ElementFactory.make("udpsink", "udpsink")
udpsink.set_property('host', '224.224.255.255')
udpsink.set_property('port', updsink_port_num)
udpsink.set_property('async', False)
udpsink.set_property('sync', 1)
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
if is_live:
print("Atleast one of the sources is live")
streammux.set_property('live-source', 1)
streammux.set_property('width', 640)
streammux.set_property('height', 480)
streammux.set_property('batch-size', number_sources)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "dstest3_pgie_config.txt")
pgie_batch_size=pgie.get_property("batch-size")
if(pgie_batch_size != number_sources):
print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
pgie.set_property("batch-size",number_sources)
tiler_rows=int(math.sqrt(number_sources))
tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
tiler.set_property("rows",tiler_rows)
tiler.set_property("columns",tiler_columns)
tiler.set_property("width", TILED_OUTPUT_WIDTH)
tiler.set_property("height", TILED_OUTPUT_HEIGHT)
sink.set_property("qos",0)
print("Adding elements to Pipeline \n")
pipeline.add(appsource)
pipeline.add(pgie)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(nvvidconv_postosd)#rtsp
pipeline.add(caps)
pipeline.add(encoder)
appsource.link(nvvidconv_postosd)
nvvidconv_postosd.link(caps)
#caps.link(transform)
#transform.link(sink)
caps.link(encoder)
encoder.link(rtppay)
rtppay.link(udpsink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
tiler_src_pad=pgie.get_static_pad("src")
#if not tiler_src_pad:
#sys.stderr.write(" Unable to get src pad \n")
#else:
#tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)
rtsp_port_num = 8554
server = GstRtspServer.RTSPServer.new()
server.props.service = "%d" % rtsp_port_num
server.attach(None)
factory = GstRtspServer.RTSPMediaFactory.new()
factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, "H264"))
factory.set_shared(True)
server.get_mount_points().add_factory("/ds-test", factory)
print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num)
#osdsinkpad = nvosd.get_static_pad("udpsink")
#osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
pipeline.set_state(Gst.State.PLAYING)
for _ in range(5):
arr=np.random.randint(low=0,high=255,size=(480,640,3),dtype=np.uint8)
img=cv2.cvtColor(arr, cv2.COLOR_BGR2RGBA)
appsource.emit("push-buffer",ndarray_to_gst_buffer(img))
time.sleep(1)
appsource.emit("end-of-stream")
try:
loop.run()
except:
pass
# cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
if name == ‘main’:
sys.exit(main(sys.argv))
I try img into buffer , and hope output rtsp stream , and use another camera catch rtsp stream to show image
I run the result below:
jetson2021030510@jetson2021030510:/opt/nvidia/deepstream/deepstream-5.0/sources/deepstream_python_apps/apps/deepstream-test3$ python3 deepstream_test_3_rtsp.py ‘rtsp://admin:abc541287@192.168.0.106:554/cam/realmonitor?channel=1&subtype=0&unicast=true&proto=Onvif’
deepstream_test_3_rtsp.py:30: PyGIWarning: GstRtspServer was imported without specifying a version first. Use gi.require_version(‘GstRtspServer’, ‘1.0’) before import to ensure that the right version gets loaded.
from gi.repository import GObject, Gst, GstRtspServer
2021-09-03 12:11:47.787169: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.2
Creating Pipeline
Creating streamux
Creating source_bin 0
Creating Pgie
Creating tiler
Creating nvvidconv
Creating nvosd
Creating transform
Creating EGLSink
Atleast one of the sources is live
Adding elements to Pipeline
Linking elements in the Pipeline
*** DeepStream: Launched RTSP Streaming at rtsp://localhost:8554/ds-test ***
Now playing…
Starting pipeline
Opening in BLOCKING MODE
NvMMLiteOpen : Block : BlockType = 4
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4
0:00:14.312013008 16322 0x392730c0 INFO nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger: NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1701> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine
INFO: [Implicit Engine Info]: layers num: 3
0 INPUT kFLOAT input_1 3x368x640
1 OUTPUT kFLOAT conv2d_bbox 16x23x40
2 OUTPUT kFLOAT conv2d_cov/Sigmoid 4x23x40
0:00:14.312475642 16322 0x392730c0 INFO nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger: NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:1805> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-5.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine
0:00:14.370511384 16322 0x392730c0 INFO nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus: [UID 1]: Load new model:dstest3_pgie_config.txt sucessfully
H264: Profile = 66, Level = 0
(python3:16322): GStreamer-CRITICAL **: 12:12:00.599: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
(python3:16322): GStreamer-CRITICAL **: 12:12:00.599: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
(python3:16322): GStreamer-CRITICAL **: 12:12:00.600: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
(python3:16322): GStreamer-CRITICAL **: 12:12:00.601: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
(python3:16322): GStreamer-CRITICAL **: 12:12:00.602: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
(python3:16322): GStreamer-CRITICAL **: 12:12:01.569: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
(python3:16322): GStreamer-CRITICAL **: 12:12:02.576: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
(python3:16322): GStreamer-CRITICAL **: 12:12:03.584: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
(python3:16322): GStreamer-CRITICAL **: 12:12:04.591: gst_segment_to_running_time: assertion ‘segment->format == format’ failed
End-of-stream
Exiting app
jetson2021030510@jetson2021030510:/opt/nvidia/deepstream/deepstream-5.0/sources/deepstream_python_apps/apps/deepstream-test3$
how to solve the problem? or what methods to arrive my target
thanks