Multiple output resolutions Gstreamer

I want to get multiple resolution output from gstreamer, one is original and the second is resized. I tried a pipeline as bellow:


But it didn’t work as expected, two gst buffer I got from two appsink has the same resolution (640x480x3). What I did wrong?

I cannot answer from the graph above.
Could you post the complete command and its output?

gst-launch-1.0 -v  your_pipeline

I used Gst python to construct this pipeline. Here is a piece of my code:

def on_rtspsrc_pad_added(self, rtspsrc, pad, rtph264depay):
    # Create the rest of your pipeline here and link it
    rtspsrc.link(rtph264depay)


def run(self):
    self.pipeline = Gst.Pipeline.new("receiver")

    # Create rtspsrc element
    self.source = Gst.ElementFactory.make("rtspsrc", None)
    self.source.set_property("latency", 0)
    self.source.set_property("drop-on-latency", "true")
    self.source.set_property("location", self.rtspLink)

    self.pipeline.add(self.source)

    # Create rtph264depay element
    self.rtph264depay = Gst.ElementFactory.make("rtph264depay", None)
    if not self.rtph264depay:
        print("Could not create rtph264depay element")
        self.stop.set()
    self.pipeline.add(self.rtph264depay)

    # Create h264parse element
    self.h264parse = Gst.ElementFactory.make("h264parse", None)
    if not self.h264parse:
        print("Could not create h264parse element")
        self.stop.set()
    self.pipeline.add(self.h264parse)

    # Create omxh264dec element
    self.omxh264dec = Gst.ElementFactory.make("omxh264dec", None)
    if not self.omxh264dec:
        print("Could not create omxh264dec element")
        self.stop.set()
    self.pipeline.add(self.omxh264dec)

    # Create nvvidconv element
    self.nvvidconv = Gst.ElementFactory.make("nvvidconv", None)
    if not self.nvvidconv:
         print("Could not create nvvidconv element")
         self.stop.set()
    self.pipeline.add(self.nvvidconv)

    # Create tee element
    self.tee = Gst.ElementFactory.make("tee", None)
    if not self.tee:
        print("Could not create tee element")
        self.stop.set()
    self.pipeline.add(self.tee)

    # Create queue element
    self.queue = Gst.ElementFactory.make("queue", None)
    if not self.queue:
        print("Could not create queue element")
        self.stop.set()
    self.pipeline.add(self.queue)

    # Create queue 2 element
    self.queue_2 = Gst.ElementFactory.make("queue", None)
    if not self.queue_2:
        print("Could not create queue_2 element")
        self.stop.set()
    self.pipeline.add(self.queue_2)

    # Create cap_filter element
    self.cap_filter = Gst.ElementFactory.make("capsfilter", None)
    self.pipeline.add(self.cap_filter)
    caps = Gst.Caps.from_string("video/x-raw, format=(string)BGRx, framerate=(fraction)20/1")
    self.cap_filter.set_property("caps", caps)
    if not self.cap_filter:
        print("Could not create capsfilter element")
        self.stop.set()

    # Create cap_filter 2 element
    self.cap_filter_2 = Gst.ElementFactory.make("capsfilter", None)
    self.pipeline.add(self.cap_filter_2)
    caps_2 = Gst.Caps.from_string("video/x-raw, format=(string)BGRx, framerate=(fraction)20/1")
    self.cap_filter_2.set_property("caps", caps_2)
    if not self.cap_filter_2:
        print("Could not create capsfilter 2 element")
        self.stop.set()

    # Create videoconvert element
    self.videoconvert = Gst.ElementFactory.make("videoconvert", None)
    if not self.videoconvert:
        print("Could not create videoconvert element")
        self.stop.set()
    self.pipeline.add(self.videoconvert)

    # Create videoconvert 2 element
    self.videoconvert_2 = Gst.ElementFactory.make("videoconvert", None)
    if not self.videoconvert_2:
        print("Could not create videoconvert_2 element")
        self.stop.set()
    self.pipeline.add(self.videoconvert_2)

    # Create cap_filter 3 element
    self.cap_filter_3 = Gst.ElementFactory.make("capsfilter", None)
    self.pipeline.add(self.cap_filter_3)
    caps_3 = Gst.Caps.from_string("video/x-raw, format=(string)BGR")
    self.cap_filter_3.set_property("caps", caps_3)
    if not self.cap_filter_3:
        print("Could not create capsfilter 3 element")
        self.stop.set()

    # Create cap_filter 4 element
    self.cap_filter_4 = Gst.ElementFactory.make("capsfilter", None)
    self.pipeline.add(self.cap_filter_4)
    caps_4 = Gst.Caps.from_string("video/x-raw, width=(int)640, height=(int)480")
    self.cap_filter_4.set_property("caps", caps_4)
    if not self.cap_filter_4:
        print("Could not create capsfilter 4 element")
        self.stop.set()

    # Create appsink element
    # self.appsink = Gst.ElementFactory.make("xvimagesink", None)
    self.appsink = Gst.ElementFactory.make("appsink", None)
    self.appsink.set_property("sync", 0)
    self.appsink.set_property("drop", "true")
    self.appsink.set_property("emit-signals", True)
    if not self.appsink:
        print("Could not create appsink element")
        self.stop.set()
    self.pipeline.add(self.appsink)

    # Create appsink_2 element
    # self.appsink_2 = Gst.ElementFactory.make("xvimagesink", None)
    self.appsink_2 = Gst.ElementFactory.make("appsink", None)
    self.appsink_2.set_property("sync", 0)
    self.appsink_2.set_property("drop", "true")
    self.appsink_2.set_property("emit-signals", True)
    if not self.appsink_2:
        print("Could not create appsink_2 element")
    self.pipeline.add(self.appsink_2)

    # Link all Gstreamer elements together
    self.rtph264depay.link(self.h264parse)
    self.h264parse.link(self.omxh264dec)
    self.omxh264dec.link(self.nvvidconv)
    self.nvvidconv.link(self.tee)

    # Tee 1
    self.tee.link(self.queue)
    self.queue.link(self.cap_filter)
    # self.tee.link(self.cap_filter)
    self.cap_filter.link(self.videoconvert)
    self.videoconvert.link(self.cap_filter_3)
    self.cap_filter_3.link(self.appsink)

    # Tee 2
    self.tee.link(self.queue_2)
    self.queue_2.link(self.cap_filter_2)
    # self.tee.link(self.cap_filter_2)
    self.cap_filter_2.link(self.videoconvert_2)
    self.videoconvert_2.link(self.cap_filter_4)
    self.cap_filter_4.link(self.appsink_2)

    self.source.connect("pad-added", self.on_rtspsrc_pad_added, self.rtph264depay)

    # Start playing
    Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL, "pipeline")
    ret = self.pipeline.set_state(Gst.State.PLAYING)
    if ret == Gst.StateChangeReturn.FAILURE:
        print("Unable to set the pipeline to the playing state.")
        self.stop.set()

    # Wait until error or EOS
    bus = self.pipeline.get_bus()

    while True:

        if self.stop.is_set():
            print('Stopping CAM Stream by main process')
            break

        message = bus.timed_pop_filtered(10000, Gst.MessageType.ANY)

        if message:
            if message.type == Gst.MessageType.ERROR:
                err, debug = message.parse_error()
                print("Error received from element %s: %s" % (
                    message.src.get_name(), err))
                print("Debugging information: %s" % debug)
                break
            elif message.type == Gst.MessageType.EOS:
                print("End-Of-Stream reached.")
                break
            elif message.type == Gst.MessageType.STATE_CHANGED:
                if isinstance(message.src, Gst.Pipeline):
                    old_state, new_state, pending_state = message.parse_state_changed()
                    print("Pipeline state changed from %s to %s." %
                          (old_state.value_nick, new_state.value_nick))
            # else:
            #     print("Unexpected message received:", message)
            #     self.unexpected_cnt = self.unexpected_cnt + 1
            #     if self.unexpected_cnt == self.num_unexpected_tot:
            #         break


    print('Terminating gstreamer pipeline...')
    self.stop.set()
    self.pipeline.set_state(Gst.State.NULL)

I’d guess the problem is with the 20 fps framerate. Would it work without this ?
Seems this works :

gst-launch-1.0 -v rtspsrc location=rtspt://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_175k.mov ! application/x-rtp, media=video, encoding-name=H264 ! rtph264depay ! h264parse ! omxh264dec ! tee name=nvmm_video ! queue ! nvvidconv ! video/x-raw, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink name=app1    nvmm_video. ! queue ! nvvidconv ! video/x-raw, format=BGRx, width=640, height=480 ! videoconvert !  video/x-raw, format=BGR ! appsink name=app2

…assuming your appsinks expect BGR format.
You would use another format such as YUY2 for xvimagesink instead:

gst-launch-1.0 -v rtspsrc location=rtspt://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_175k.mov ! application/x-rtp, media=video, encoding-name=H264 ! rtph264depay ! h264parse ! omxh264dec ! tee name=nvmm_video ! queue ! nvvidconv ! video/x-raw, format=BGRx ! videoconvert ! video/x-raw, format=YUY2 ! xvimagesink    nvmm_video. ! queue ! nvvidconv ! video/x-raw, format=BGRx, width=640, height=480 ! videoconvert ! video/x-raw, format=YUY2 ! xvimagesink

If you want a fixed 20 fps framerate, and if the RTSP source may provide another framerate, you would have to use videorate plugin, but this may hurt perfomance, especially if native framerate / 20 is not an integer.

Many thansk for your support. I test the the second pipeline that u gave, it worked, but I saw the problem about latency, although I set latency=0 after rtsp link and sync=0 at xvimagesink. I used Jetson Nano for testing, is it too weak for handling this pipeline? And sometime this piepeline crashed immediately after starting if it had been cancelled before, was a problem with gstreamer cache?

I suspect the crash to be related to omxh264dec when input stream stalls with excessively low latency.
You may try to use nvv4l2decoder instead of omxh264dec that is getting deprecated.
Note that you would have to remove h264parse for this case:

gst-launch-1.0 -v rtspsrc latency=300 location=rtspt://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_175k.mov ! application/x-rtp, media=video, encoding-name=H264 ! rtph264depay ! nvv4l2decoder ! tee name=nvmm_video ! queue ! nvvidconv ! video/x-raw, format=BGRx ! videoconvert ! video/x-raw, format=YUY2 ! xvimagesink    nvmm_video. ! queue ! nvvidconv ! video/x-raw, format=BGRx, width=640, height=480 ! videoconvert ! video/x-raw, format=YUY2 ! xvimagesink

And another thing is when using nvv4l2decoder on Jetson Nano, I always see this warning:

v4l2bufferpool gstv4l2bufferpool.c:1482:gst_v4l2_buffer_pool_dqbuf:<nvv4l2decoder0:pool:sink> v4l2 provided buffer that is too big for the memory it was writing into. v4l2 claims 1008 bytes used but memory is only 0B. This is probably a driver bug.

This is the reason make I considered to use omxh264dec instead of nvv4l2decoder. My
currently using Jetpack version is 4.3

Sorry I missed your reply. I see no notification.
This warning may be harmless and removed according to:

You may also have a look to this:

1 Like

Hi,
The print is harmless. For removing it, you may download the gst-v4l2 source code and rebuild/replace

/usr/lib/aarch64-linux-gnu/gstreamer-1.0/libgstnvvideo4linux2.so

Thank Honey_Patouceul’s instant information.