openCV video writer to gstreamer appsrc

and to stream both cameras stitched together as bash script:

gst-launch-1.0 nvcompositor name=comp \
  sink_0::xpos=0 sink_0::ypos=0 sink_0::width=1280 sink_0::height=720 \
  sink_1::xpos=0 sink_1::ypos=720 sink_1::width=1280 sink_1::height=720 ! \
nvvidconv ! \
nvv4l2h264enc bitrate=16000000 insert-sps-pps=true ! \
rtph264pay mtu=1400 ! \
udpsink host=192.168.0.106 port=5000  \
nvarguscamerasrc sensor-id=0 sensor-mode=3 ! \
'video/x-raw(memory:NVMM),
  width=1280,
  height=720,
  format=NV12,
  framerate=60/1' ! \
nvvidconv flip-method=0 ! \
comp.sink_0 \
nvarguscamerasrc sensor-id=1 sensor-mode=3 ! \
  'video/x-raw(memory:NVMM),
  width=1280,
  height=720,
  format=NV12,
  framerate=60/1' ! \
nvvidconv flip-method=0 ! \
comp.sink_1

receive with

application/x-rtp,encoding-name=H264,payload=96 ! \
rtph264depay ! \
h264parse ! \
queue ! \
avdec_h264 ! \
autovideosink sync=false async=false -e
1 Like

hello @thompson.reed thanks for the base example for streaming. What i am trying to do is stream processed videos from an rtsp link.

I can display my processed images in numpy array format but I could not stream it over the network. When I try your example like:

import cv2
import threading
import numpy as np

# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of each camera pane in the window on the screen

cam = None

class CSI_Camera:

    def __init__ (self) :
        # Initialize instance variables
        # OpenCV video capture element
        self.video_capture = None
        # The last captured image from the camera
        self.frame = None
        self.grabbed = False
        # The thread where the video capture runs
        self.read_thread = None
        self.read_lock = threading.Lock()
        self.running = False


    def open(self, gstreamer_pipeline_string):
        try:
            self.video_capture = cv2.VideoCapture(
                gstreamer_pipeline_string, cv2.CAP_GSTREAMER
            )
            
        except RuntimeError:
            self.video_capture = None
            print("Unable to open camera")
            print("Pipeline: " + gstreamer_pipeline_string)
            return
        # Grab the first frame to start the video capturing
        self.grabbed, self.frame = self.video_capture.read()

    def start(self):
        if self.running:
            print('Video capturing is already running')
            return None
        # create a thread to read the camera image
        if self.video_capture != None:
            self.running=True
            self.read_thread = threading.Thread(target=self.updateCamera)
            self.read_thread.start()
        return self

    def stop(self):
        self.running=False
        self.read_thread.join()

    def updateCamera(self):
        # This is the thread to read images from the camera
        while self.running:
            try:
                grabbed, frame = self.video_capture.read()
                with self.read_lock:
                    self.grabbed=grabbed
                    self.frame=frame
            except RuntimeError:
                print("Could not read image from camera")
        # FIX ME - stop and cleanup thread
        # Something bad happened
        

    def read(self):
        with self.read_lock:
            frame = self.frame.copy()
            grabbed=self.grabbed
        return grabbed, frame

    def release(self):
        if self.video_capture != None:
            self.video_capture.release()
            self.video_capture = None
        # Now kill the thread
        if self.read_thread != None:
            self.read_thread.join()


# Currently there are setting frame rate on CSI Camera on Nano through gstreamer
# Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)
def gstreamer_pipeline():
    return (
        "nvarguscamerasrc sensor-id=1 sensor-mode=3 ! "
        "video/x-raw(memory:NVMM), "
        "  width=1280, "
        "  height=720, "
        "  format=NV12, "
        "  framerate=60/1 ! "
        "nvvidconv flip-method=0 ! "
        "videoconvert ! "
        "video/x-raw, format=BGR ! "
        " appsink"
    )

def gstreamer_pipeline_out():
    return (
        "appsrc ! "
        "video/x-raw, format=BGR ! "
        "queue ! "
        "videoconvert ! "
        "video/x-raw, format=BGRx ! "
        "nvvidconv ! "
        "omxh264enc ! "
        "video/x-h264, stream-format=byte-stream ! "
        "h264parse ! "
        "rtph264pay pt=96 config-interval=1 ! "
        "udpsink host=192.168.0.110 port=5000"
    )

def start_cameras():

    cap_vid = cv2.VideoCapture("/home/nvidia/Downloads/s1.mp4")


    out = cv2.VideoWriter(gstreamer_pipeline_out(), 0, 60, (1280,720))

    while not out.isOpened():
      print('VideoWriter not opened')
      SystemExit(0)

    while True :

        ret, frame_vid = cap_vid.read()
        img = cv2.blur(frame_vid,(3,3))
        #img = cv2.medianBlur(img,9)
        out.write(img)

    # cam.stop()
    # cam.release()

if __name__ == "__main__":
    start_cameras()

But at the end I cannot get the stream.

Best Regards