Implementing OpenCv in python3 gstreamer code

I’m currently involved in a project focused on fire detection, utilizing a FLIR Hadron camera alongside a Jetson Nano computer. My aim is to handle the data from the camera by displaying, saving, and streaming it. Initially, I attempted this using a combination of GStreamer and OpenCV in Python3. However, I encountered a consistent issue of frame loss for RGB data while simply reading from the camera using cap.read() in OpenCV.

To address this, I developed a functional code (THE FIRST CODE) that successfully streams data to a server without relying on OpenCV.
and in the second code, I employ an appsink to receive the data, enabling subsequent processing using OpenCV for display and storage purposes. but I encountered glitches in both display and streaming due to frame loss, and the playback of saved videos was accelerated.

I would greatly appreciate any advice or insights on how to mitigate these issues. Thank you for your assistance.

the first code:

import gi
from time import sleep
from datetime import datetime

gi.require_version("Gst", "1.0")
# gi.require_version("GstApp", "1.0")

from gi.repository import Gst #, GstApp

current_datetime = datetime.now()
dt = current_datetime.strftime("%Y-%m-%d_%H-%M-%S")
print(dt)

Gst.init()

pipeRGB =(
    "v4l2src io-mode=4 device=/dev/video0 ! video/x-raw, width=1920, height=1080, framerate=30/1 ! tee name=t "
    "t. ! queue ! nvvidconv ! nvv4l2h264enc ! h264parse ! flvmux ! rtmpsink location='www.google.com live=1' "
    "t. ! queue ! videorate ! video/x-raw, width=1920, height=1080, framerate=1/30 ! queue ! videoconvert ! video/x-raw,format=BGR ! appsink name=sinkRGB"
)



pipelineRGB = Gst.parse_launch(pipeRGB)
print("Parsing RGB Pipeline")
pipelineRGB.set_state(Gst.State.PLAYING)
print("RGB Pipeline playing")

try:
    while True:
        sleep(1)
except KeyboardInterrupt:
    print("""\r\n
    ============================
    Keyboard Interrupt received
    ============================
    Closing GStreamer Pipeline""")
    pipelineRGB.set_state(Gst.State.NULL)
    pass

print("Finished!")

the second code :

mport gi
import cv2
import numpy as np
import time
from datetime import datetime


gi.require_version("Gst", "1.0")
from gi.repository import Gst, GLib

current_datetime = datetime.now()
dt = current_datetime.strftime("%Y-%m-%d_%H-%M-%S")
print(dt)

Gst.init()

pipeRGB = (
    "v4l2src io-mode=4 device=/dev/video0 ! video/x-raw, width=1920, height=1080, framerate=30/1 ! tee name=t "
    "t. ! queue ! nvvidconv ! nvv4l2h264enc ! h264parse ! flvmux ! rtmpsink location='WWW.google.com live=1' "
    "t. ! queue ! videoconvert ! video/x-raw,format=BGR ! appsink name=sinkRGB"
)

pipelineRGB = Gst.parse_launch(pipeRGB)
appsinkRGB = pipelineRGB.get_by_name("sinkRGB")

def on_new_sample(sink):
    sample = sink.emit("pull-sample")
    buf = sample.get_buffer()
    caps = sample.get_caps()
    array = np.ndarray(
        (caps.get_structure(0).get_value('height'),
         caps.get_structure(0).get_value('width'),
         3),
        buffer=buf.extract_dup(0, buf.get_size()),
        dtype=np.uint8)
    return array

appsinkRGB.connect("new-sample", on_new_sample)

actual_fps=30
actual_width=1920
actual_height=1080

date_string= time.strftime("%Y-%m-%d-%H:%M")
gst_writer_str = 'appsrc ! video/x-raw,format=BGR,width=1920,height=1080,framerate=30/1 !  queue ! videoconvert ! video/x-raw,format=BGRx ! nvvidconv ! nvv4l2h264enc maxperf-enable=1 preset-level=4 control-rate=1 bitrate=8000000 ! h264parse ! matroskamux ! filesink location=RGB_' + date_string + '.mp4' 
fourcc = 0 #RAW
out = cv2.VideoWriter(gst_writer_str, cv2.CAP_GSTREAMER, fourcc, actual_fps, (actual_width, actual_height), True)
pipelineRGB.set_state(Gst.State.PLAYING)
while True:
    frame = on_new_sample(appsinkRGB)
    if frame is not None:
        out.write(frame)
        cv2.imshow('RGB Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
pipelineRGB.set_state(Gst.State.NULL)
out.release()
cv2.destroyAllWindows()

print("Finished!")

Note : i have to have the data proccesed via opencv because we want to implement AI after that is done in opencv