Cannot set nvstreamux batchsize

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) Jetson tx2
• DeepStream Version 5.1
• JetPack Version (valid for Jetson only) 4.5.1

Hi.
Without add nvinferserver: if set nvstreamux batsize = 4, number input stream: 4. everything run ok
with nvinferserver: triton_max_batchsize = 4. if set nvstreamux bathsize = 1. number input stream < 4. everything run ok
with nvinferserver: triton_max_batchsize = 4. it set nvstreamux bathsize >1 and < =4. these is an error

NvMapMemCacheMaint Bad parameter
nvbusurface: NvBufSurfaceSyncForCpu: Error(4) in sync
libnvosd (127):(ERROR) : Cache sync failed 
libnvosd (234):(ERROR) : Error in nvll_osd_put_text0:00:07.239277293  9429   0x7eac019130 WARN           nvinferserver gstnvinferserver.cpp:506:gst_nvinfer_server_push_buffer:<primary-inference> error: Internal data stream error.
0:00:07.239315981  9429   0x7eac019130 WARN           nvinferserver gstnvinferserver.cpp:506:gst_nvinfer_server_push_buffer:<primary-inference> error: streaming stopped, reason error (-5)
Error: gst-resource-error-quark: Unable to draw text (1): /dvs/git/dirty/git-master_linux/deepstream/sdk/src/gst-plugins/gst-nvdsosd/gstnvdsosd.c(525): gst_nvds_osd_transform_ip (): /GstPipeline:pipeline0/GstNvDsOsd:onscreendisplay

Reproduce:
nvstreamux

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 4)
    streammux.set_property('batched-push-timeout', 200)

deepstream-triton config

max_batch_size: 4
  backend {
    inputs [
      {
        name: "data"
        dims: [3, 480, 640]
      }
    ]

After inspection, if i remove below code (draw code) from pgie call back function, it will work as normal

for frame_object in frame_object_list:
            add_obj_meta_to_frame(
                frame_object, batch_meta, frame_meta, label_names)

Why?

Reproduce:

draw func

def add_obj_meta_to_frame(frame_object, batch_meta, frame_meta, label_names):
    """ Inserts an object into the metadata """
    # this is a good place to insert objects into the metadata.
    # Here's an example of inserting a single object.
    obj_meta = pyds.nvds_acquire_obj_meta_from_pool(batch_meta)
    # Set bbox properties. These are in input resolution.
    rect_params = obj_meta.rect_params
    # rect_params.left = int(IMAGE_WIDTH * frame_object.left)
    # rect_params.top = int(IMAGE_HEIGHT * frame_object.top)
    # rect_params.width = int(IMAGE_WIDTH * frame_object.width)
    # rect_params.height = int(IMAGE_HEIGHT * frame_object.height)

    rect_params.left = int(frame_object.left)
    rect_params.top = int(frame_object.top)
    rect_params.width = int(frame_object.width)
    rect_params.height = int(frame_object.height)

    # Semi-transparent yellow backgroud
    rect_params.has_bg_color = 0
    rect_params.bg_color.set(1, 1, 0, 0.4)

    # Red border of width 3
    rect_params.border_width = 3
    rect_params.border_color.set(1, 0, 0, 1)

    # Set object info including class, detection confidence, etc.
    obj_meta.confidence = frame_object.detectionConfidence
    obj_meta.class_id = frame_object.classId

    # There is no tracking ID upon detection. The tracker will
    # assign an ID.
    obj_meta.object_id = UNTRACKED_OBJECT_ID

    lbl_id = frame_object.classId
    if lbl_id >= len(label_names):
        lbl_id = 0

    # Set the object classification label.
    obj_meta.obj_label = label_names[lbl_id]

    # Set display text for the object.
    txt_params = obj_meta.text_params
    if txt_params.display_text:
        pyds.free_buffer(txt_params.display_text)

    txt_params.x_offset = int(rect_params.left)
    txt_params.y_offset = max(0, int(rect_params.top) - 10)
    txt_params.display_text = (
        label_names[lbl_id] + " " +
        "{:04.3f}".format(frame_object.detectionConfidence)
    )
    # Font , font-color and font-size
    txt_params.font_params.font_name = "Serif"
    txt_params.font_params.font_size = 10
    # set(red, green, blue, alpha); set to White
    txt_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

    # Text background color
    txt_params.set_bg_clr = 1
    # set(red, green, blue, alpha); set to Black
    txt_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)

    # Inser the object into current frame meta
    # This object has no parent
    pyds.nvds_add_obj_meta_to_frame(frame_meta, obj_meta, None)

pgie call back

try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        print("frame_number: ", frame_number)

        l_user = frame_meta.frame_user_meta_list
        while l_user is not None:
            try:
                # Note that l_user.data needs a cast to pyds.NvDsUserMeta
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                user_meta = pyds.NvDsUserMeta.cast(l_user.data)
            except StopIteration:
                break

            if (
                    user_meta.base_meta.meta_type
                    != pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META
            ):
                continue

            tensor_meta = pyds.NvDsInferTensorMeta.cast(
                user_meta.user_meta_data)

            # Boxes in the tensor meta should be in network resolution which is
            # found in tensor_meta.network_info. Use this info to scale boxes to
            # the input frame resolution.
            layers_info = []
            for i in range(tensor_meta.num_output_layers):
                layer = pyds.get_nvds_LayerInfo(tensor_meta, i)
                layers_info.append(layer)

            frame_object_list, faces = nvds_infer_parse_custom_tf_ssd(
                layers_info, batch_meta, frame_meta, label_names
            )

            # if len(faces) > 0:
            #     batch_inferences[frame_meta.pad_index][frame_number] = faces
            print("faces: ", faces)
            # create_display_meta(faces, frame_meta)
            try:
                l_user = l_user.next
            except StopIteration:
                break

            for frame_object in frame_object_list:
                add_obj_meta_to_frame(
                    frame_object, batch_meta, frame_meta, label_names)

My fault. i was solved this problem

Glad to know issue resolved, thanks for the update!