I need WebM format video in deepstream code

I refer this link: Why input audio not generating in output video - #5 by user7471
I used muxmp4.py for create mp4 format. AM getting also… but i need webm format also.

The audio is getting correctly in output.

Is it possible to save the output video with audio in two different video format(mp4 and WebM) ?

I tried, but facing some errors.
Please help me to correct it.

This is the main() function I modified.

def main(args):
    # Check input arguments
    if len(args) != 3:
        sys.stderr.write("usage: %s <media file or uri> <output mp4>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    uridecodebin = Gst.ElementFactory.make("uridecodebin", "uridecodebin")
    if not uridecodebin:
        sys.stderr.write(" Unable to create uridecodebin \n")
    uridecodebin.set_property('uri', args[1])
    uridecodebin.connect("pad-added", pad_added_callback, pipeline)

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
    caps = Gst.Caps.from_string("video/x-raw(memory:NVMM),format=NV12")
    capsfilter.set_property("caps", caps)

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create nvv4l2h264enc
    nvv4l2h264enc = Gst.ElementFactory.make("nvv4l2h264enc", "nvv4l2h264enc")
    if not nvv4l2h264enc:
        sys.stderr.write(" Unable to create nvv4l2h264enc \n")

    # Create nvv4l2h264enc
    h264parse = Gst.ElementFactory.make("h264parse", "h264parse")
    if not h264parse:
        sys.stderr.write(" Unable to create h264parse \n")

    if os.environ.get('USE_NEW_NVSTREAMMUX') != 'yes': # Only set these properties if not using new gst-nvstreammux
        streammux.set_property('width', 1920)
        streammux.set_property('height', 1080)
        streammux.set_property('batched-push-timeout', 4000000)

    streammux.set_property('batch-size', 1)
    pgie.set_property('config-file-path', "dstest1_pgie_config.txt")

    queue = Gst.ElementFactory.make("queue", "audio-queue")
    if not queue:
        sys.stderr.write(" Unable to create audio-queue \n")

    # Create audioconvert
    audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
    if not audioconvert:
        sys.stderr.write(" Unable to create audioconvert \n")

    # Create avenc_aac
    avenc_aac = Gst.ElementFactory.make("avenc_aac", "avenc_aac")
    if not avenc_aac:
        sys.stderr.write(" Unable to create avenc_aac \n")

    # Create mp4mux
    mp4mux = Gst.ElementFactory.make("mp4mux", "mp4mux")
    if not mp4mux:
        sys.stderr.write(" Unable to create mp4mux \n")

    # Create webmmux
    webmmux = Gst.ElementFactory.make("webmmux", "webmmux")
    if not webmmux:
        sys.stderr.write(" Unable to create webmmux \n")

    # Create vp8enc
    vp8enc = Gst.ElementFactory.make("vp8enc", "vp8enc")
    if not vp8enc:
        sys.stderr.write(" Unable to create vp8enc \n")

    # Set properties for webmmux
    webmmux.set_property('name', 'webmmux')
    webmmux.set_property('streamable', True)

    # Set properties for vp8enc
    vp8enc.set_property('deadline', 1)
    vp8enc.set_property('threads', 2)

    print("Creating filesink \n")
    sink = Gst.ElementFactory.make("filesink", "filesink")
    if not sink:
        sys.stderr.write(" Unable to create filesink \n")
    print("Playing file %s " %args[1])
    sink.set_property('location', args[2])

    # Set location for WebM file
    sink_webm = Gst.ElementFactory.make("filesink", "filesink_webm")
    sink_webm.set_property('location', args[2] + ".webm")

    print("Adding elements to Pipeline \n")
    pipeline.add(uridecodebin)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvosd)
    pipeline.add(capsfilter)
    pipeline.add(nvvidconv)
    pipeline.add(nvv4l2h264enc)
    pipeline.add(h264parse)

    pipeline.add(queue)
    pipeline.add(audioconvert)
    pipeline.add(avenc_aac)

    pipeline.add(mp4mux)
    pipeline.add(webmmux)
    pipeline.add(vp8enc)
    pipeline.add(sink)
    pipeline.add(sink_webm)

    # video pipeline
    # streammux --> pgie --> nvosd --> nvvidconv
    # --> capsfilter --> nvv4l2h264enc --> h264parse
    streammux.link(pgie)
    pgie.link(nvosd)
    nvosd.link(nvvidconv)
    nvvidconv.link(capsfilter)
    capsfilter.link(nvv4l2h264enc)
    nvv4l2h264enc.link(h264parse)

    # audio pipeline
    # audioconvert -> avenc_aac
    queue.link(audioconvert)
    audioconvert.link(avenc_aac)
    # mux audio/video to mp4
    mp4mux.link(sink)

    # Link elements for WebM encoding
    nvvidconv.link(vp8enc)
    vp8enc.link(webmmux)
    webmmux.link(sink_webm)

    mp4mux_video_sink_pad = mp4mux.get_request_pad("video_0")
    h264parse.get_static_pad("src").link(mp4mux_video_sink_pad)

    # Add probe for WebM encoding
    webmmux_video_sink_pad = webmmux.get_request_pad("video_0")
    h264parse.get_static_pad("src").link(webmmux_video_sink_pad)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except KeyboardInterrupt:
        # send eos to pipeline
        print("send eos ....")
        pipeline.send_event(Gst.Event.new_eos())
        print("Waiting for the EOS message on the bus")
        bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.EOS)
        loop.quit()
    except:
        pass
    # cleanup
    print("set_state null")
    pipeline.set_state(Gst.State.NULL)

muxwebmandmp4.py (14.9 KB)

Put it to deepstream-test1

python3 muxwebmandmp4.py file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 out.mp4 out.webm

It is no different from mp4muxer, and this is not a problem with deepstream

Is it possible to implement in deepstream_imagedata-multistream.py

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.