How to save video with 2 camera PI with jeston nano

Hello,

I have the code of JetsonHacks and it’s 1 week, i’m triing to modifed for save the video in mp4 with 2 cameraPI . Does anyone know how to do this?

import cv2
import threading
import io
import subprocess
import os.path
import shlex
import time

from multiprocessing import Queue
import sys

import numpy as np # Make sure NumPy is loaded before it is used in the callback
assert np # avoid “imported but unused” message (W0611)

gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera

Flip the image by setting the flip_method (most common values: 0 and 2)

display_width and display_height determine the size of each camera pane in the window on the screen

front_camera = None
back_camera = None

class CSI_Camera:

def __init__ (self) :
    # Initialize instance variables
    # OpenCV video capture element
    self.video_capture = None
    # The last captured image from the camera
    self.frame = None
    self.grabbed = False
    # The thread where the video capture runs
    self.read_thread = None
    self.read_lock = threading.Lock()
    self.running = False


def open(self, gstreamer_pipeline_string):
    try:
        self.video_capture = cv2.VideoCapture(
            gstreamer_pipeline_string, cv2.CAP_GSTREAMER
        )
        
    except RuntimeError:
        self.video_capture = None
        print("Unable to open camera")
        print("Pipeline: " + gstreamer_pipeline_string)
        return
    # Grab the first frame to start the video capturing
    self.grabbed, self.frame = self.video_capture.read()

def start(self):
    if self.running:
        print('Video capturing is already running')
        return None
    # create a thread to read the camera image
    if self.video_capture != None:
        self.running=True
        self.read_thread = threading.Thread(target=self.updateCamera)
        self.read_thread.start()
    return self

def stop(self):
    self.running=False
    self.read_thread.join()

def updateCamera(self):
    # This is the thread to read images from the camera
    while self.running:
        try:
            grabbed, frame = self.video_capture.read()
            with self.read_lock:
                self.grabbed=grabbed
                self.frame=frame
        except RuntimeError:
            print("Could not read image from camera")
    # FIX ME - stop and cleanup thread
    # Something bad happened
    

def read(self):
    with self.read_lock:
        frame = self.frame.copy()
        grabbed=self.grabbed
    return grabbed, frame

def release(self):
    if self.video_capture != None:
        self.video_capture.release()
        self.video_capture = None
    # Now kill the thread
    if self.read_thread != None:
        self.read_thread.join()

Currently there are setting frame rate on CSI Camera on Nano through gstreamer

Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)

def gstreamer_pipeline(
sensor_id=0,
sensor_mode=3,
capture_width=1920,
capture_height=1080,
display_width=480,
display_height=2*720,
framerate=30,
flip_method=0,
):
return (
"nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
“video/x-raw, format=(string)BGR ! appsink”
% (
sensor_id,
sensor_mode,
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)

def start_cameras():

#Parametre camera avant
front_camera = CSI_Camera()
front_camera.open(
    gstreamer_pipeline(
        framerate=30,
        sensor_id=1,
        sensor_mode=3,
        capture_width=1920,
        capture_height=1080,
        flip_method=2, #retourne l'image de 180 deg 
        display_height=480,
        display_width=720,
    )
)
front_camera.start()

#Parametre camera arrière
back_camera = CSI_Camera()
back_camera.open(
    gstreamer_pipeline(
        sensor_id=0,
        sensor_mode=3,
        capture_width=1920,
        capture_height=1080,
        flip_method=0,
        display_height=480,
        display_width=720,
    )
)
back_camera.start()

cv2.namedWindow("CSI Cameras", cv2.WINDOW_AUTOSIZE)

if (
    not front_camera.video_capture.isOpened()
    or not back_camera.video_capture.isOpened()
):
    # Cameras did not open, or no camera attached

    print("Unable to open any cameras")
    # TODO: Proper Cleanup
    SystemExit(0)


while cv2.getWindowProperty("CSI Cameras", 0) >= 0 :
    #Lecture de la camera avant
    _ , front_image=front_camera.read()
    #Lecture de la camera arriere
    _ , back_image=back_camera.read()
    camera_images = np.hstack((front_image, back_image))
    cv2.imshow("CSI Cameras", camera_images)
# This also acts as
    keyCode = cv2.waitKey(30) & 0xFF
    # Stop the program on the ESC key
    if keyCode == 27:
        break

front_camera.stop()
front_camera.release()
back_camera.stop()
back_camera.release()
cv2.destroyAllWindows()

if name == “main”:
start_cameras()

hello sugene14,

it’s sample codes to compose gstreamer pipeline.
you may also have a quick try, please refer to below to enable camera with nvarguscamerasrc to capture video stream for 300-frames.
for example,
$ gst-launch-1.0 nvarguscamerasrc sensor-id=0 num-buffers=300 ! 'video/x-raw(memory:NVMM), width=1280, height=720, framerate=30/1' ! nvtee ! omxh264enc bitrate=20000000 ! qtmux ! filesink location=video.mp4

please refer to developer guide for the documentation, you may check Multimedia chapter for Accelerated GStreamer.
there’re also some Multimedia API Sample Applications for your reference.
thanks