Video REcord via DS python API

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU)
JetsonNano 4GB
• DeepStream Version
6.0.1
• JetPack Version (valid for Jetson only)
4.6.1
• TensorRT Version
8.2.1-1+CUDA10.2

I am trying to run this code to record video but I couldn’t achieve my purpose,video file is created but cannot be open because its size never become greater than 0 byte, I will provide the code and terminal o/p:
code:

import sys
sys.path.append('../')
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from utils.common.is_aarch_64 import is_aarch64
from utils.common.bus_call import bus_call
from utils.common.FPS import GETFPS
import pyds

fps_streams={}
sended=[]
MAX_DISPLAY_LEN = 64
MUXER_OUTPUT_WIDTH = 1280
MUXER_OUTPUT_HEIGHT = 720
MUXER_BATCH_TIMEOUT_USEC = 3400000
TILED_OUTPUT_WIDTH = 1280
TILED_OUTPUT_HEIGHT = 720
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
def tiler_src_pad_buffer_probe(pad,info,u_data):

    gst_buffer = info.get_buffer()
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

            l_obj=frame_meta.obj_meta_list
            while l_obj: 
                try: 
                    obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
                except StopIteration:
                    break
                l_tracker_object_coordinates = obj_meta.tracker_bbox_info.org_bbox_coords
                if obj_meta.obj_label == "oos" and obj_meta.object_id not in sended:
                    print("class_name",obj_meta.obj_label)
                    print("object_id",obj_meta.object_id)
                    print("top:",l_tracker_object_coordinates.top)
                    print("left:",l_tracker_object_coordinates.left)
                    print("width:",l_tracker_object_coordinates.width)
                    print("height:",l_tracker_object_coordinates.height)
                    sended.append(obj_meta.object_id)
                if len(sended) > 20:
                    del sended[0]
                try:
                    l_obj=l_obj.next
                except StopIteration:
                    break
            fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()

            l_frame=l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK    

def cb_newpad(decodebin, decoder_src_pad,data):
    print("In cb_newpad\n")
    caps=decoder_src_pad.get_current_caps()
    gststruct=caps.get_structure(0)
    gstname=gststruct.get_name()
    source_bin=data
    features=caps.get_features(0)
    print("gstname=",gstname)
    if(gstname.find("video")!=-1):
        print("features=",features)
        if features.contains("memory:NVMM"):
            bin_ghost_pad=source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy,Object,name,user_data):
    print("Decodebin child added:", name, "\n")
    if(name.find("decodebin") != -1):
        Object.connect("child-added",decodebin_child_added,user_data)
    # Object.set_property("drop-frame-interval", 0)

def create_source_bin(index,uri):
    print("Creating source bin")
    bin_name="source-bin-%02d" %index
    nbin=Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")
    uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    uri_decode_bin.set_property("uri",uri)
    uri_decode_bin.connect("pad-added",cb_newpad,nbin)
    uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
    Gst.Bin.add(nbin,uri_decode_bin)
    bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main():
    GObject.threads_init()
    Gst.init(None)
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    
    
    queue = Gst.ElementFactory.make("queue", "queue")
    nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2")
    capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
    encoder = Gst.ElementFactory.make("avenc_mpeg4", "encoder")
    codeparser = Gst.ElementFactory.make("mpeg4videoparse", "mpeg4-parser")
    container = Gst.ElementFactory.make("qtmux", "qtmux")
    filesink = Gst.ElementFactory.make("filesink", "filesink")
    if(is_aarch64()):
        transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
    
    caps = Gst.Caps.from_string("video/x-raw, format=I420")
    capsfilter.set_property("caps", caps)
    
    encoder.set_property("bitrate", 2000000)
    
    nvosd.set_property('process-mode',0)
    nvosd.set_property('display-text',1)

    filesink.set_property("location", "./out.mp4")
    filesink.set_property('sync', 0)
    sink.set_property('sync', 0)
    
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 384)
    tracker.set_property('gpu_id', 0)
    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so')
    tracker.set_property('ll-config-file', 'trackers/config_tracker_IOU.yml')
    tracker.set_property('enable_batch_process', 1)
    tracker.set_property('enable_past_frame', 0)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 400000)
    streammux.set_property('sync-inputs',0)
    
    pgie.set_property('config-file-path', "utils/pgie_yolov4_tiny_config.txt")  
    #pgie.set_property('config-file-path', "utils/yolotiny_infer_primary.txt")  
    pgie.set_property('batch-size',1)

    tiler.set_property("rows",1)
    tiler.set_property("columns",1)
    tiler.set_property("width", 720)
    tiler.set_property("height", 480)

    print("Adding elements to Pipeline \n")

    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(queue)
    pipeline.add(nvvidconv2)
    pipeline.add(capsfilter)
    pipeline.add(encoder)
    pipeline.add(codeparser)
    pipeline.add(container)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(filesink)

    uri_name="file:///home/tosso/Documents/tosso_koctas/utils/videos/reyon_corp.mp4"
    if uri_name.find("rtsp://") == 0 :
        streammux.set_property('live-source', 0)
    fps_streams["stream{0}".format(0)]=GETFPS(0)
    source_bin=create_source_bin(0, uri_name)
    pipeline.add(source_bin)
    padname="sink_%u" %0
    sinkpad= streammux.get_request_pad(padname)
    srcpad=source_bin.get_static_pad("src")
    srcpad.link(sinkpad)


    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(tiler)
    tiler.link(nvvidconv2)
    nvvidconv2.link(nvosd)
    nvosd.link(queue)
    #queue.link(nvvidconv2)
    queue.link(capsfilter)
    capsfilter.link(encoder)
    encoder.link(codeparser)
    codeparser.link(container)
    if is_aarch64():
        container.link(transform)
        transform.link(filesink)
    else:
        container.link(filesink)

    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)
    tiler_src_pad=tracker.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)

    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
    main()

terminal:

tosso@tosso:~/Documents/tosso_koctas$ python3 kayit.py 
Creating Pipeline 
 
Adding elements to Pipeline 

Creating source bin
Linking elements in the Pipeline 

Starting pipeline 

gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so
~~ CLOG[/dvs/git/dirty/git-master_linux/deepstream/sdk/src/utils/nvmultiobjecttracker/include/modules/NvMultiObjectTracker/NvTrackerParams.hpp, getConfigRoot() @line 54]: [NvTrackerParams::getConfigRoot()] !!![WARNING] Invalid low-level config file caused an exception, but will go ahead with the default config values
gstnvtracker: Batch processing is ON
gstnvtracker: Past frame output is OFF
~~ CLOG[/dvs/git/dirty/git-master_linux/deepstream/sdk/src/utils/nvmultiobjecttracker/include/modules/NvMultiObjectTracker/NvTrackerParams.hpp, getConfigRoot() @line 54]: [NvTrackerParams::getConfigRoot()] !!![WARNING] Invalid low-level config file caused an exception, but will go ahead with the default config values
[NvMultiObjectTracker] Initialized
0:00:11.439073151 11180     0x226cd670 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/home/tosso/Documents/tosso_koctas/utils/models/trt_engine/yolov4_cspdarknet_tiny_epoch_480.etlt_b1_gpu0_fp32.engine
INFO: [Implicit Engine Info]: layers num: 5
0   INPUT  kFLOAT Input           3x640x640       
1   OUTPUT kINT32 BatchedNMS      1               
2   OUTPUT kFLOAT BatchedNMS_1    200x4           
3   OUTPUT kFLOAT BatchedNMS_2    200             
4   OUTPUT kFLOAT BatchedNMS_3    200             

0:00:11.440479381 11180     0x226cd670 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /home/tosso/Documents/tosso_koctas/utils/models/trt_engine/yolov4_cspdarknet_tiny_epoch_480.etlt_b1_gpu0_fp32.engine
0:00:11.506777456 11180     0x226cd670 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:utils/pgie_yolov4_tiny_config.txt sucessfully
Decodebin child added: source 

Decodebin child added: decodebin0 

Decodebin child added: qtdemux0 

Decodebin child added: multiqueue0 

Decodebin child added: h264parse0 

Decodebin child added: capsfilter0 

Decodebin child added: nvv4l2decoder0 

Opening in BLOCKING MODE 
NvMMLiteOpen : Block : BlockType = 261 
NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 261 
In cb_newpad

gstname= video/x-raw
features= <Gst.CapsFeatures object at 0x7faade3f48 (GstCapsFeatures at 0x7f306e0fa0)>
*******FPS**************
Fps of stream 0 is  14.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
class_name oos
object_id 12
top: 155.77748107910156
left: 791.2494506835938
width: 456.8789978027344
height: 282.42193603515625
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  12.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0
class_name oos
object_id 64
top: 947.822021484375
left: 624.6297607421875
width: 114.32098388671875
height: 108.49394226074219
1 Like

You can use this pipline to save file to raw h264 stream. software encoder can’t access the gpu memory.

osd -> nvvideoconvert --> nvv4l2h264encoder --> filesink 

I did what you mentioned above with this code:

import sys
sys.path.append('../')
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from utils.common.is_aarch_64 import is_aarch64
from utils.common.bus_call import bus_call
from utils.common.FPS import GETFPS
import pyds

fps_streams={}
sended=[]
MAX_DISPLAY_LEN = 64
MUXER_OUTPUT_WIDTH = 1280
MUXER_OUTPUT_HEIGHT = 720
MUXER_BATCH_TIMEOUT_USEC = 3400000
TILED_OUTPUT_WIDTH = 1280
TILED_OUTPUT_HEIGHT = 720
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
def tiler_src_pad_buffer_probe(pad,info,u_data):

    gst_buffer = info.get_buffer()
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

            l_obj=frame_meta.obj_meta_list
            while l_obj: 
                try: 
                    obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
                except StopIteration:
                    break
                l_tracker_object_coordinates = obj_meta.tracker_bbox_info.org_bbox_coords
                if obj_meta.obj_label == "oos" and obj_meta.object_id not in sended:
                    print("class_name",obj_meta.obj_label)
                    print("object_id",obj_meta.object_id)
                    print("top:",l_tracker_object_coordinates.top)
                    print("left:",l_tracker_object_coordinates.left)
                    print("width:",l_tracker_object_coordinates.width)
                    print("height:",l_tracker_object_coordinates.height)
                    sended.append(obj_meta.object_id)
                if len(sended) > 20:
                    del sended[0]
                try:
                    l_obj=l_obj.next
                except StopIteration:
                    break
            fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()

            l_frame=l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK    

def cb_newpad(decodebin, decoder_src_pad,data):
    print("In cb_newpad\n")
    caps=decoder_src_pad.get_current_caps()
    gststruct=caps.get_structure(0)
    gstname=gststruct.get_name()
    source_bin=data
    features=caps.get_features(0)
    print("gstname=",gstname)
    if(gstname.find("video")!=-1):
        print("features=",features)
        if features.contains("memory:NVMM"):
            bin_ghost_pad=source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy,Object,name,user_data):
    print("Decodebin child added:", name, "\n")
    if(name.find("decodebin") != -1):
        Object.connect("child-added",decodebin_child_added,user_data)
    # Object.set_property("drop-frame-interval", 0)

def create_source_bin(index,uri):
    print("Creating source bin")
    bin_name="source-bin-%02d" %index
    nbin=Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")
    uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    uri_decode_bin.set_property("uri",uri)
    uri_decode_bin.connect("pad-added",cb_newpad,nbin)
    uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
    Gst.Bin.add(nbin,uri_decode_bin)
    bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main():
    GObject.threads_init()
    Gst.init(None)
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    
    
    nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2")
    nvvidconv3 = Gst.ElementFactory.make("nvvideoconvert", "convertor3")
    capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
    nvencoder = Gst.ElementFactory.make("nvv4l2h264enc", "nvencoder")


    filesink = Gst.ElementFactory.make("filesink", "filesink")
    if(is_aarch64()):
        transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
    
    caps = Gst.Caps.from_string("video/x-raw, format=I420")
    capsfilter.set_property("caps", caps)
    
    
    nvosd.set_property('process-mode',0)
    nvosd.set_property('display-text',1)

    filesink.set_property("location", "./out.mp4")
    filesink.set_property('sync', 0)
    sink.set_property('sync', 0)
    
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 384)
    tracker.set_property('gpu_id', 0)
    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so')
    tracker.set_property('ll-config-file', 'trackers/config_tracker_IOU.yml')
    tracker.set_property('enable_batch_process', 1)
    tracker.set_property('enable_past_frame', 0)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 400000)
    streammux.set_property('sync-inputs',0)
    
    pgie.set_property('config-file-path', "utils/pgie_yolov4_tiny_config.txt")  
    #pgie.set_property('config-file-path', "utils/yolotiny_infer_primary.txt")  
    pgie.set_property('batch-size',1)

    tiler.set_property("rows",1)
    tiler.set_property("columns",1)
    tiler.set_property("width", 720)
    tiler.set_property("height", 480)

    print("Adding elements to Pipeline \n")

    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv2)
    pipeline.add(nvvidconv3)
    pipeline.add(capsfilter)
    pipeline.add(nvencoder)

    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(filesink)

    uri_name="file:///home/tosso/Documents/tosso_koctas/utils/videos/reyon_corp.mp4"
    if uri_name.find("rtsp://") == 0 :
        streammux.set_property('live-source', 0)
    fps_streams["stream{0}".format(0)]=GETFPS(0)
    source_bin=create_source_bin(0, uri_name)
    pipeline.add(source_bin)
    padname="sink_%u" %0
    sinkpad= streammux.get_request_pad(padname)
    srcpad=source_bin.get_static_pad("src")
    srcpad.link(sinkpad)


    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(tiler)
    tiler.link(nvvidconv2)
    nvvidconv2.link(nvosd)
    nvosd.link(nvvidconv3)
    nvvidconv3.link(nvencoder)
    if is_aarch64():
        transform.link(filesink)
    else:
        nvencoder.link(filesink)

    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)
    tiler_src_pad=tracker.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)

    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
    main()

file is creating BUT it is still empty.
here is the terminal o/p:

Creating Pipeline 
 
Adding elements to Pipeline 

Creating source bin
Linking elements in the Pipeline 

Starting pipeline 

Opening in BLOCKING MODE 
gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so
~~ CLOG[/dvs/git/dirty/git-master_linux/deepstream/sdk/src/utils/nvmultiobjecttracker/include/modules/NvMultiObjectTracker/NvTrackerParams.hpp, getConfigRoot() @line 54]: [NvTrackerParams::getConfigRoot()] !!![WARNING] Invalid low-level config file caused an exception, but will go ahead with the default config values
gstnvtracker: Batch processing is ON
gstnvtracker: Past frame output is OFF
~~ CLOG[/dvs/git/dirty/git-master_linux/deepstream/sdk/src/utils/nvmultiobjecttracker/include/modules/NvMultiObjectTracker/NvTrackerParams.hpp, getConfigRoot() @line 54]: [NvTrackerParams::getConfigRoot()] !!![WARNING] Invalid low-level config file caused an exception, but will go ahead with the default config values
[NvMultiObjectTracker] Initialized
0:00:07.113667643 10372     0x13eed760 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/home/tosso/Documents/tosso_koctas/utils/models/trt_engine/yolov4_cspdarknet_tiny_epoch_480.etlt_b1_gpu0_fp32.engine
INFO: [Implicit Engine Info]: layers num: 5
0   INPUT  kFLOAT Input           3x640x640       
1   OUTPUT kINT32 BatchedNMS      1               
2   OUTPUT kFLOAT BatchedNMS_1    200x4           
3   OUTPUT kFLOAT BatchedNMS_2    200             
4   OUTPUT kFLOAT BatchedNMS_3    200             

0:00:07.114866264 10372     0x13eed760 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /home/tosso/Documents/tosso_koctas/utils/models/trt_engine/yolov4_cspdarknet_tiny_epoch_480.etlt_b1_gpu0_fp32.engine
0:00:07.162544330 10372     0x13eed760 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:utils/pgie_yolov4_tiny_config.txt sucessfully
Decodebin child added: source 

Decodebin child added: decodebin0 

Decodebin child added: qtdemux0 

Decodebin child added: multiqueue0 

Decodebin child added: h264parse0 

Decodebin child added: capsfilter0 

Decodebin child added: nvv4l2decoder0 

Opening in BLOCKING MODE 
NvMMLiteOpen : Block : BlockType = 261 
NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 261 
In cb_newpad

gstname= video/x-raw
features= <Gst.CapsFeatures object at 0x7f879c0fa8 (GstCapsFeatures at 0x7f18042d20)>
NvMMLiteOpen : Block : BlockType = 4 
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4 
H264: Profile = 66, Level = 0 
NVMEDIA_ENC: bBlitMode is set to TRUE 
*******FPS**************
Fps of stream 0 is  14.0
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  12.0
class_name oos
object_id 12
top: 155.77748107910156
left: 791.2494506835938
width: 456.8789978027344
height: 282.42193603515625
*******FPS**************
Fps of stream 0 is  11.0
*******FPS**************
Fps of stream 0 is  11.0

You should use video/x-raw(memory:NVMM),format=NV12.

I changed as : caps = Gst.Caps.from_string("video/x-raw(memory:NVMM),format=NV12")
still problem is same.

In addition to the above modifications, the pipeline needs to be modified as follows.

I have tried, It work normally.

tiler.link(nvvidconv2)
nvvidconv2.link(nvosd)
nvosd.link(nvvidconv3)
nvvidconv3.link(capsfilter)
capsfilter.link(nvencoder)
nvencoder.link(filesink)

Ialready do this in the code , but video is not recording. only Video file is creating

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

You can try remove the tracker elements from the pipeline first.

If you have one stream only,tiler element is not necessary.

Make sure the pipeline is running normally.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.