Decrease FPS and see it on my display

Please provide complete information as applicable to your setup.

**• JETSON NANO2GB
**• DeepStream Version = 6.0.1

Hi, my problem is: I want decrease fps in my inference but when i try it with caps.set_property(“caps”, Gst.Caps.from_string(“video/x-raw(memory:NVMM), format=I420,framerate=10/1”))

I cant see the results in my screen and i get this issue:

0:01:29.754772650 11648 0x3722a540 WARN nvinfer gstnvinfer.cpp:2288:gst_nvinfer_output_loop: error: streaming stopped, reason not-negotiated (-4)
Error: gst-stream-error-quark: Internal data stream error. (1): /dvs/git/dirty/git-master_linux/deepstream/sdk/src/gst-plugins/gst-nvinfer/gstnvinfer.cpp(2288): gst_nvinfer_output_loop (): /GstPipeline:pipeline0/GstNvInfer:primary-inference:
streaming stopped, reason not-negotiated (-4)
Exiting app.

when i run it without framerate=10/1 i dont have problem, but i need reduce fps and see the inference in my screen

my code is:

def main(args):

number_sources = len(uri_inputs)
global perf_data
perf_data = PERF_DATA(number_sources)

global folder_name
folder_name = "out_crops"

if path.exists(folder_name):
    sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
    sys.exit(1)

os.mkdir(folder_name)
print("Frames will be saved in ", folder_name)
# Standard GStreamer initialization
Gst.init(None)

# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
is_live = False

if not pipeline:
    sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")

# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
    sys.stderr.write(" Unable to create NvStreamMux \n")

pipeline.add(streammux)

for i in range(number_sources):
    os.mkdir(folder_name + "/stream_" + str(i))
    frame_count["stream_" + str(i)] = 0
    saved_count["stream_" + str(i)] = 0
    print("Creating source_bin ", i, " \n ")
    uri_name = uri_inputs[i]
    if uri_name.find("rtsp://") == 0:
        is_live = True
    source_bin = create_source_bin(i, uri_name)
    if not source_bin:
        sys.stderr.write("Unable to create source bin \n")
    pipeline.add(source_bin)
    padname = "sink_%u" % i
    sinkpad = streammux.get_request_pad(padname)
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin \n")
    srcpad = source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin \n")
    srcpad.link(sinkpad)

print("Creating Pgie \n ")
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
    sys.stderr.write(" Unable to create pgie \n")


print("Creating nvvidconv1 \n ")
nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
if not nvvidconv1:
    sys.stderr.write(" Unable to create nvvidconv1 \n")

print("Creating filter1 \n ")
caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
if not filter1:
    sys.stderr.write(" Unable to get the caps filter1 \n")
filter1.set_property("caps", caps1)

print("Creating tiler \n ")
tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
if not tiler:
    sys.stderr.write(" Unable to create tiler \n")

print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
    sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd \n ")

nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
    sys.stderr.write(" Unable to create nvosd \n")

nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
if not nvvidconv_postosd:
    sys.stderr.write(" Unable to create nvvidconv_postosd \n")

# Create a caps filter
caps = Gst.ElementFactory.make("capsfilter", "filter")
caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420,framerate=10/1"))

transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
    sys.stderr.write(" Unable to create udpsink")



print("Playing file {} ".format(uri_inputs))

streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', number_sources)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "config_infer_primary_facedetectir.txt")
pgie_batch_size = pgie.get_property("batch-size")

if (pgie_batch_size != number_sources):
    print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
          number_sources, " \n")
    pgie.set_property("batch-size", number_sources)
tiler_rows = int(math.sqrt(number_sources))
tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
tiler.set_property("rows", tiler_rows)
tiler.set_property("columns", tiler_columns)
tiler.set_property("width", TILED_OUTPUT_WIDTH)
tiler.set_property("height", TILED_OUTPUT_HEIGHT)

if not is_aarch64():
    # Use CUDA unified memory in the pipeline so frames
    # can be easily accessed on CPU in Python.
    mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
    streammux.set_property("nvbuf-memory-type", mem_type)
    nvvidconv.set_property("nvbuf-memory-type", mem_type)
    nvvidconv1.set_property("nvbuf-memory-type", mem_type)
    tiler.set_property("nvbuf-memory-type", mem_type)
    nvvidconv_postosd.set_property("nvbuf-memory-type", mem_type)

print("Adding elements to Pipeline \n")
pipeline.add(pgie)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(filter1)
pipeline.add(nvvidconv1)
pipeline.add(nvosd)
pipeline.add(nvvidconv_postosd)
pipeline.add(caps)
pipeline.add(sink)
pipeline.add(transform)

print("Linking elements in the Pipeline \n")
streammux.link(pgie)
pgie.link(nvvidconv1)
nvvidconv1.link(filter1)
filter1.link(tiler)
tiler.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(nvvidconv_postosd)
nvvidconv_postosd.link(caps)
caps.link(transform)
transform.link(sink)

loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)

# Start streaming
rtsp_port_num = 8554

server = GstRtspServer.RTSPServer.new()
server.props.service = "%d" % rtsp_port_num
server.attach(None)


print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num)

print("Starting pipeline \n")
# start play back and listed to events		
pipeline.set_state(Gst.State.PLAYING)
try:
    loop.run()
except:
    pass
# cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
1 Like

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

please refer to this topic I want to infer 1 frame per second in deepstream-test5 app

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.