Why input audio not generating in output video

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) GPU

Am doing face emotion detection.
input video have audio but when process is complete. Why output video not generating audio?

I need same audio which input have

Please build the following pipeline.

             | --> nvstreammux --> pgie --> sgie --> nvosd --> nvvideoconvert --> nvv4l2h264enc --> h264parse --> mp4mux |
uridecodebin |                                                                                                           | --> filesink
             |  --> queue --> audioconvert --> avenc_aac----------------------------------------------------------> mp4mux

The following is an example.

gst-launch-1.0 -e uridecodebin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 name=dec dec. !  queue ! mux.sink_0 nvstreammux name=mux batch-size=1 width=1280 height=720   ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test2/dstest2_pgie_config.txt   ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test2/dstest2_sgie1_config.txt ! nvdsosd ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=NV12' !  nvv4l2h264enc ! h264parse ! mp4.video_0  mp4mux name=mp4 ! filesink location=out.mp4  dec. ! queue ! audioconvert ! avenc_aac ! mp4.audio_0

tried your sample pipeline… Its working
I tried to add this pipeline in my code, but its not working. Below is my main code: Can you check it.

def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN] <output_folder>\n" % args[0])
        sys.exit(1)

    global perf_data
    perf_data = PERF_DATA(len(args) - 2)
    number_sources = len(args) - 2

    global folder_name
    folder_name = args[-1]
    if os.path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
        sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)

    # Standard GStreamer initialization
    Gst.init(None)

    # Create GStreamer elements
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write("Unable to create Pipeline\n")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write("Unable to create NvStreamMux\n")

    pipeline.add(streammux)

    audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
    if not audioconvert:
        sys.stderr.write("Unable to create audioconvert element\n")

    pipeline.add(audioconvert)

    for i in range(number_sources):
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        source_audio_bin = create_source_audio_bin(i, uri_name)

        if not source_bin or not source_audio_bin:
            sys.stderr.write("Unable to create source bin or source audio bin \n")

        pipeline.add(source_bin)
        pipeline.add(source_audio_bin)

        padname_audio = "sink_audio_%u" % i
        sinkpad_audio = audioconvert.get_request_pad(padname_audio)
        print("Sinkpad audio:", sinkpad_audio)  # Add this line
        if not sinkpad_audio:
            sys.stderr.write("Unable to create audio src pad bin \n")
        srcpad_audio = audioconvert.get_static_pad("src")
        print("Srcpad audio:", srcpad_audio)  # Add this line
        if not srcpad_audio:
            sys.stderr.write("Unable to create audio sink pad bin \n")
        if srcpad_audio and sinkpad_audio:
            srcpad_audio.link(sinkpad_audio)
        else:
            sys.stderr.write("Error linking audio pads: srcpad_audio or sinkpad_audio is None\n")




    # Creating audio processing elements
    audio_queue = Gst.ElementFactory.make("queue", "audio-queue")
    if not audio_queue:
        sys.stderr.write("Unable to create audio queue\n")

    pipeline.add(audio_queue)

    avenc_aac = Gst.ElementFactory.make("avenc_aac", "avenc_aac")
    if not avenc_aac:
        sys.stderr.write("Unable to create avenc_aac\n")

    pipeline.add(avenc_aac)

    mp4mux_audio = Gst.ElementFactory.make("mp4mux", "mp4mux_audio")
    if not mp4mux_audio:
        sys.stderr.write("Unable to create mp4mux_audio\n")

    pipeline.add(mp4mux_audio)

    audio_sink = Gst.ElementFactory.make("filesink", "audio-sink")
    if not audio_sink:
        sys.stderr.write("Unable to create audio sink\n")

    audio_sink.set_property("location", os.path.join(folder_name, "Annotated_Audio.mp4"))
    pipeline.add(audio_sink)

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write("Unable to create pgie\n")

    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write("Unable to create nvvidconv1\n")

    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write("Unable to get the caps filter1\n")
    filter1.set_property("caps", caps1)

    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write("Unable to create tiler\n")

    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write("Unable to create nvvidconv\n")

    print("Creating video encoding and output elements \n ")
    nvvidconv_out = Gst.ElementFactory.make("nvvideoconvert", "convertor_out")
    if not nvvidconv_out:
        sys.stderr.write("Unable to create nvvideoconvert\n")

    mp4mux = Gst.ElementFactory.make("mp4mux", "mp4mux")
    if not mp4mux:
        sys.stderr.write("Unable to create mp4mux\n")
        sys.exit(1)  # Exit the program if mp4mux creation fails

    ret = pipeline.add(mp4mux)
    if ret != Gst.PadProbeReturn.OK:
        sys.stderr.write("Failed to add mp4mux to the pipeline\n")
        sys.exit(1)  # Exit the program if adding mp4mux to the pipeline fails

    filesink = Gst.ElementFactory.make("filesink", "filesink")
    if not filesink:
        sys.stderr.write("Unable to create filesink\n")
        sys.exit(1)  # Exit the program if filesink creation fails

    filesink.set_property("location", os.path.join(folder_name, "Annotated_Video.mp4"))
    pipeline.add(filesink)

    if is_live:
        print("At least one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "config_infer_primary_yoloV8_face.txt")

    pgie_batch_size = pgie.get_property("batch-size")
    if pgie_batch_size != number_sources:
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with the number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)

    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvvidconv_out)
    pipeline.add(mp4mux)
    pipeline.add(filesink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvvidconv_out)
    nvvidconv_out.link(mp4mux)
    mp4mux.link(filesink)

    # create an event loop and feed GStreamer bus messages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write("Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if i != 0:
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listen to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

How to add these pipeline in my code?
I’m trying to add these in my code, But I’m confused with the alignment.
Please help me to correct it.

This is my main function

def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0])
        sys.exit(1)

    global perf_data
    perf_data = PERF_DATA(len(args) - 2)
    number_sources = len(args) - 2

    global folder_name
    folder_name = args[-1]
    if path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
        sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)
    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create the audio processing branch
    queue_audio = Gst.ElementFactory.make("queue", "queue_audio")

    # Create the audio processing elements
    audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
    avenc_aac = Gst.ElementFactory.make("avenc_aac", "avenc_aac")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
        queue_audio_pad = queue_audio.get_static_pad("src")
        queue_audio_pad.link(pgie.get_request_pad("sink_audio"))

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
  
    print("Creating video encoding and output elements \n ")
    nvvidconv_out = Gst.ElementFactory.make("nvvideoconvert", "convertor_out")
    if not nvvidconv_out:
        sys.stderr.write(" Unable to create nvvideoconvert \n")

    nvh264enc = Gst.ElementFactory.make ("nvv4l2h264enc" ,"nvvideo-h264enc")
    h264parse1 = Gst.ElementFactory.make ("h264parse" ,"h264parse1")

    qtmux = Gst.ElementFactory.make("qtmux", "qtmux")
    if not qtmux:
        sys.stderr.write(" Unable to create qtmux \n")

    filesink = Gst.ElementFactory.make("filesink", "filesink")
    if not filesink:
        sys.stderr.write(" Unable to create filesink \n")
    output_file_path = os.path.join(folder_name, "Annotated_Video.mp4")
    filesink.set_property("location", output_file_path)

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "config_infer_primary_yoloV8_face.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")
    pipeline.add(queue_audio, audioconvert, avenc_aac)
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvvidconv_out)
    pipeline.add(nvh264enc)
    pipeline.add(h264parse1)
    pipeline.add(qtmux)
    pipeline.add(filesink)

    print("Linking elements in the Pipeline \n")
    queue_audio.link(audioconvert)
    audioconvert.link(avenc_aac)
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvvidconv_out)
    nvvidconv_out.link(nvh264enc)
    nvh264enc.link(h264parse1)
    h264parse1.link(qtmux)
    qtmux.link(filesink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if i != 0:
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

These is just a gstreamer issue, and we don’t normally debug code for users.

You can refer to this example.

muxmp4.py (12.0 KB)

Put the code in the deepstream-test1 directory and run it

python3 muxmp4.py file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 out.mp4

Thanks,
The audio is getting correctly in output.

Is it possible to save the output video with audio in two different video format(mp4 and WebM) ?

I tried, but facing some errors.
Please help me to correct it.

This is the main() function I modified.

def main(args):
    # Check input arguments
    if len(args) != 3:
        sys.stderr.write("usage: %s <media file or uri> <output mp4>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    uridecodebin = Gst.ElementFactory.make("uridecodebin", "uridecodebin")
    if not uridecodebin:
        sys.stderr.write(" Unable to create uridecodebin \n")
    uridecodebin.set_property('uri', args[1])
    uridecodebin.connect("pad-added", pad_added_callback, pipeline)

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
    caps = Gst.Caps.from_string("video/x-raw(memory:NVMM),format=NV12")
    capsfilter.set_property("caps", caps)

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create nvv4l2h264enc
    nvv4l2h264enc = Gst.ElementFactory.make("nvv4l2h264enc", "nvv4l2h264enc")
    if not nvv4l2h264enc:
        sys.stderr.write(" Unable to create nvv4l2h264enc \n")

    # Create nvv4l2h264enc
    h264parse = Gst.ElementFactory.make("h264parse", "h264parse")
    if not h264parse:
        sys.stderr.write(" Unable to create h264parse \n")

    if os.environ.get('USE_NEW_NVSTREAMMUX') != 'yes': # Only set these properties if not using new gst-nvstreammux
        streammux.set_property('width', 1920)
        streammux.set_property('height', 1080)
        streammux.set_property('batched-push-timeout', 4000000)

    streammux.set_property('batch-size', 1)
    pgie.set_property('config-file-path', "dstest1_pgie_config.txt")

    queue = Gst.ElementFactory.make("queue", "audio-queue")
    if not queue:
        sys.stderr.write(" Unable to create audio-queue \n")

    # Create audioconvert
    audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
    if not audioconvert:
        sys.stderr.write(" Unable to create audioconvert \n")

    # Create avenc_aac
    avenc_aac = Gst.ElementFactory.make("avenc_aac", "avenc_aac")
    if not avenc_aac:
        sys.stderr.write(" Unable to create avenc_aac \n")

    # Create mp4mux
    mp4mux = Gst.ElementFactory.make("mp4mux", "mp4mux")
    if not mp4mux:
        sys.stderr.write(" Unable to create mp4mux \n")

    # Create webmmux
    webmmux = Gst.ElementFactory.make("webmmux", "webmmux")
    if not webmmux:
        sys.stderr.write(" Unable to create webmmux \n")

    # Create vp8enc
    vp8enc = Gst.ElementFactory.make("vp8enc", "vp8enc")
    if not vp8enc:
        sys.stderr.write(" Unable to create vp8enc \n")

    # Set properties for webmmux
    webmmux.set_property('name', 'webmmux')
    webmmux.set_property('streamable', True)

    # Set properties for vp8enc
    vp8enc.set_property('deadline', 1)
    vp8enc.set_property('threads', 2)

    print("Creating filesink \n")
    sink = Gst.ElementFactory.make("filesink", "filesink")
    if not sink:
        sys.stderr.write(" Unable to create filesink \n")
    print("Playing file %s " %args[1])
    sink.set_property('location', args[2])

    # Set location for WebM file
    sink_webm = Gst.ElementFactory.make("filesink", "filesink_webm")
    sink_webm.set_property('location', args[2] + ".webm")

    print("Adding elements to Pipeline \n")
    pipeline.add(uridecodebin)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvosd)
    pipeline.add(capsfilter)
    pipeline.add(nvvidconv)
    pipeline.add(nvv4l2h264enc)
    pipeline.add(h264parse)

    pipeline.add(queue)
    pipeline.add(audioconvert)
    pipeline.add(avenc_aac)

    pipeline.add(mp4mux)
    pipeline.add(webmmux)
    pipeline.add(vp8enc)
    pipeline.add(sink)
    pipeline.add(sink_webm)

    # video pipeline
    # streammux --> pgie --> nvosd --> nvvidconv
    # --> capsfilter --> nvv4l2h264enc --> h264parse
    streammux.link(pgie)
    pgie.link(nvosd)
    nvosd.link(nvvidconv)
    nvvidconv.link(capsfilter)
    capsfilter.link(nvv4l2h264enc)
    nvv4l2h264enc.link(h264parse)

    # audio pipeline
    # audioconvert -> avenc_aac
    queue.link(audioconvert)
    audioconvert.link(avenc_aac)
    # mux audio/video to mp4
    mp4mux.link(sink)

    # Link elements for WebM encoding
    nvvidconv.link(vp8enc)
    vp8enc.link(webmmux)
    webmmux.link(sink_webm)

    mp4mux_video_sink_pad = mp4mux.get_request_pad("video_0")
    h264parse.get_static_pad("src").link(mp4mux_video_sink_pad)

    # Add probe for WebM encoding
    webmmux_video_sink_pad = webmmux.get_request_pad("video_0")
    h264parse.get_static_pad("src").link(webmmux_video_sink_pad)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except KeyboardInterrupt:
        # send eos to pipeline
        print("send eos ....")
        pipeline.send_event(Gst.Event.new_eos())
        print("Waiting for the EOS message on the bus")
        bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.EOS)
        loop.quit()
    except:
        pass
    # cleanup
    print("set_state null")
    pipeline.set_state(Gst.State.NULL)

If you have issue about Deepstream, please open a new topic.
Your problem is only related to gstreamer

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.