Custom payload for MQTT Broker

Please provide complete information as applicable to your setup.

• Hardware Platform (GPU)
• DeepStream Version 6.4
• NVIDIA GPU Driver Version (valid for GPU only) NVIDIA GeForce GTX 1650 / Driver Version: 525.147.05 / CUDA Version: 12.0
• Issue Type( questions, new requirements, bugs) question
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)

I am creating a simple application where I want to use nvmsgconv and nvmsgbroker.

A simple e2e test works and I can see the messages in the mqtt broker. Now, I want to modify the output and this is whwere it gets complicated and I am stuck.

I am working directly in the container. This is the code, how I am sending the messages to mqtt broker:

def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    num_rects=0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list

        while l_obj is not None:
            try:
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.8) #0.8 is alpha (opacity)
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        # Now the MQTT Broker message
        user_event_meta = pyds.nvds_acquire_user_meta_from_pool(batch_meta)

        if user_event_meta:
            msg_meta = pyds.alloc_nvds_event_msg_meta(user_event_meta)

            msg_meta.bbox.top = obj_meta.rect_params.top
            msg_meta.bbox.left = obj_meta.rect_params.left
            msg_meta.bbox.width = obj_meta.rect_params.width
            msg_meta.bbox.height = obj_meta.rect_params.height
            msg_meta.frameId = frame_number            

            msg_meta.confidence = obj_meta.confidence

            msg_meta = generate_event_msg_meta(msg_meta, obj_meta.class_id)

            user_event_meta.user_meta_data = msg_meta
            user_event_meta.base_meta.meta_type = pyds.NvDsMetaType.NVDS_EVENT_MSG_META
            pyds.nvds_add_user_meta_to_frame(frame_meta, user_event_meta) # This line sends data to msgbroker


        try:
            l_frame=l_frame.next
        except StopIteration:
            break
			
    return Gst.PadProbeReturn.OK	

I have found out that I need to modify the eventmsg_payload.cpp in /opt/nvidia/deepstream/deepstream/sources/libs/nvmsgconv/deepstream_schema and not the dsmeta_payload.cpp.

I tried to modify the code there but without any result. In order to check if it works, I tried to uncomment certrain lines but they still do appear.

What are steps and which files and need to modify in order, for example, not to display the “source” from “analyticsObj”.

  1. Which files to modify and where?
  2. Do I need to rebuild something?

Best regards
Oleg

  1. please modify generate_event_message/generate_event_message_minimal in opt\nvidia\deepstream\deepstream-6.4\sources\libs\nvmsgconv\nvmsgconv.cpp.
  2. yes, please rebuild /opt/nvidia/deepstream/deepstream/sources/libs/nvmsgconv according to readme. then replace /opt/nvidia/deepstream/deepstream/lib/libnvds_msgconv.so.

Which readme?

/opt/nvidia/deepstream/deepstream/sources/libs/nvmsgconv/README

Thanks, just found it :) It was in front of my eyes and I did not see it… sorry…

I just did it throws 100s lines of errors:

These are the first lines:

protoc -I./deepstream_schema --cpp_out=./deepstream_schema/ deepstream_schema/lidar_schema.proto deepstream_schema/schema.proto
g++ -o libnvds_msgconv.so nvmsgconv.cpp deepstream_schema/eventmsg_payload.cpp deepstream_schema/dsmeta_payload.cpp deepstream_schema/deepstream_schema.cpp deepstream_schema/schema.pb.cc -Wall -std=c++14 -shared -fPIC -I/opt/tritonclient/include -I../../includes -I./deepstream_schema -pthread -I/usr/include/json-glib-1.0 -I/usr/include/libmount -I/usr/include/blkid -I/usr/include/glib-2.0 -I/usr/lib/x86_64-linux-gnu/glib-2.0/include -I/usr/include/uuid -ljson-glib-1.0 -lgio-2.0 -lgobject-2.0 -lglib-2.0 -luuid -L/opt/tritonclient/lib -lyaml-cpp -lprotobuf
In file included from nvmsgconv.cpp:23:
./deepstream_schema/schema.pb.h:17:2: error: #error This file was generated by an older version of protoc which is
   17 | #error This file was generated by an older version of protoc which is
      |  ^~~~~
./deepstream_schema/schema.pb.h:18:2: error: #error incompatible with your Protocol Buffer headers. Please
   18 | #error incompatible with your Protocol Buffer headers. Please
      |  ^~~~~
./deepstream_schema/schema.pb.h:19:2: error: #error regenerate this file with a newer version of protoc.
   19 | #error regenerate this file with a newer version of protoc.
      |  ^~~~~
In file included from nvmsgconv.cpp:23:
./deepstream_schema/schema.pb.h:52:51: error: 'AuxillaryParseTableField' in namespace 'google::protobuf::internal' does not name a type; did you mean 'AuxiliaryParseTableField'?
   52 |   static const ::PROTOBUF_NAMESPACE_ID::internal::AuxillaryParseTableField aux[]
      |                                                   ^~~~~~~~~~~~~~~~~~~~~~~~
      |                                                   AuxiliaryParseTableField
In file included from nvmsgconv.cpp:23:
./deepstream_schema/schema.pb.h:220:17: error: 'nv::Frame* nv::Frame::New() const' marked 'final', but is not virtual
  220 |   inline Frame* New() const final {
      |                 ^~~
./deepstream_schema/schema.pb.h:408:7: error: wrong number of template arguments (6, should be 5)
  408 |     0 > {
      |       ^
In file included from /opt/tritonclient/include/google/protobuf/generated_message_table_driven.h:35,
                 from ./deepstream_schema/schema.pb.h:26,
                 from nvmsgconv.cpp:23:
/opt/tritonclient/include/google/protobuf/map_entry_lite.h:59:7: note: provided for 'template<class Derived, class Key, class Value, google::protobuf::internal::WireFormatLite::FieldType kKeyFieldType, google::protobuf::internal::WireFormatLite::FieldType kValueFieldType> class google::protobuf::internal::MapEntry'
   59 | class MapEntry;
      |       ^~~~~~~~
In file included from nvmsgconv.cpp:23:
./deepstream_schema/schema.pb.h:414:7: error: wrong number of template arguments (6, should be 5)
  414 |     0 > SuperType;
      |       ^
In file included from /opt/tritonclient/include/google/protobuf/generated_message_table_driven.h:35,
                 from ./deepstream_schema/schema.pb.h:26,
                 from nvmsgconv.cpp:23:
/opt/tritonclient/include/google/protobuf/map_entry_lite.h:59:7: note: provided for 'template<class Derived, class Key, class Value, google::protobuf::internal::WireFormatLite::FieldType kKeyFieldType, google::protobuf::internal::WireFormatLite::FieldType kValueFieldType> class google::protobuf::internal::MapEntry'
   59 | class MapEntry;
      |       ^~~~~~~~
In file included from nvmsgconv.cpp:23:
./deepstream_schema/schema.pb.h:425:8: error: 'void nv::Object_InfoEntry_DoNotUse::MergeFrom(const google::protobuf::Message&)' marked 'final', but is not virtual
  425 |   void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& other) final;
      |        ^~~~~~~~~
./deepstream_schema/schema.pb.h:426:37: error: 'google::protobuf::Metadata nv::Object_InfoEntry_DoNotUse::GetMetadata() const' marked 'final', but is not virtual
  426 |   ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
      |                                     ^~~~~~~~~~~
./deepstream_schema/schema.pb.h:811:9: error: wrong number of template arguments (6, should be 5)
  811 |       0 > info_;
      |         ^

please refer to this topic.

I could run make command successfully, but now I have an error starting the pipeline:

(python3:221): GStreamer-WARNING **: 14:42:42.811: Failed to load plugin '/usr/lib/x86_64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_msgconv.so': ///opt/nvidia/deepstream/deepstream-6.4/lib/libnvds_msgconv.so: undefined symbol: _ZN7ds3dmsg15LidarPointCloudD1Ev

(python3:221): GStreamer-CRITICAL **: 14:42:42.811: gst_element_factory_create_valist: assertion 'newfactory != NULL' failed
 Unable to create msgconv
Traceback (most recent call last):
  File "/usr/local/bin/inro-pipeline", line 8, in <module>
    sys.exit(cli())
  File "/opt/nvidia/deepstream/deepstream-6.4/samples/WRS_AI_Application/deepstream/app/intro_3.py", line 474, in cli
    main(args=args)
  File "/opt/nvidia/deepstream/deepstream-6.4/samples/WRS_AI_Application/deepstream/app/intro_3.py", line 336, in main
    msgconv.set_property('config', '/opt/nvidia/deepstream/deepstream/samples/WRS_AI_Application/deepstream/cfg_msgconv.txt')
AttributeError: 'NoneType' object has no attribute 'set_property'

What I did. I reaplaced the last line of Makefile as suggeted in the thread. Here is the output of the make process:
output_make.txt (9.7 KB)

Then I moved the file to the tartget directory and started my pipeline.

mv -f /opt/nvidia/deepstream/deepstream-6.4/sources/libs/nvmsgconv/libnvds_msgconv.so /opt/nvidia/deepstream/deepstream/lib/libnvds_msgconv.so

For testing purposes, I did not do any modifications to the files, that modify the output, I left them as they are.

I am working directly inside the docker container.

Best regards

please refer to this topic.

Finally worked :) Thanks!

Is there an option to add all this to README… Or are you planning to mofify the next docker version so the changes are not required any more?

yes, they are known issue. they will be fixed in the latter version.

In my project I have different objects, which are not person or vehicles or anything similar defined in event_msg_payload. I want to add a new object to nvmsconv, how would I do that?

So for example I want to add an object “Dog”. Where and how I would do that?

Which files I need to modify?

please refer to deeptream-test4; first you need to set meta->objType to “dog”, then you need to modify generate_object_object to convert the new data to Json format.

I already checked it, but I do not really understand it…

it uses pyds.alloc_nvds_vehicle_object() and I do not understand, how it then should look like, if I have a custom object?

Can u pleaes provide a better example?

Many thanks in advance for your support? :)

please refer to alloc_nvds_vehicle_object to bind a new function alloc_nvds_dog_object for the dog object.

Ok, and as soon as I am done, I need to rebuild with “make” again? Is there anything else I need to add ort modify?

you need to rebuild and reinstall whl file. please refer to this link.

I had a alook, but how do I modify this line of code?

pydsdoc::metaschema::VehicleObjectDoc::cast

in

    py::class_<NvDsVehicleObject>(m, "NvDsVehicleObject",
                                  pydsdoc::metaschema::VehicleObjectDoc::descr)
            .def(py::init<>())
            .def_property("type", STRING_PROPERTY(NvDsVehicleObject, type))
            .def_property("make", STRING_PROPERTY(NvDsVehicleObject, make))
            .def_property("model",
                          STRING_PROPERTY(NvDsVehicleObject, model))
            .def_property("color",
                          STRING_PROPERTY(NvDsVehicleObject, color))

            .def("cast",
                 [](void *data) {
                     return (NvDsVehicleObject *) data;
                 },
                 py::return_value_policy::reference,
                 pydsdoc::metaschema::VehicleObjectDoc::cast)

            .def("cast",
                 [](size_t data) {
                     return (NvDsVehicleObject *) data;
                 },
                 py::return_value_policy::reference,
                 pydsdoc::metaschema::VehicleObjectDoc::cast)

            .def_property("region",
                          STRING_PROPERTY(NvDsVehicleObject, region))
            .def_property("license",
                          STRING_PROPERTY(NvDsVehicleObject, license));

… if I want to craete a new object…

Could you please provide example, step-by-step what to modify?

Best regards and many thanks for your help.

I have modified/added following snippets to the files as following:

/opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/bindings/src/bindschema.cpp

        py::class_<NvDsCustomObject>(m, "NvDsCustomObject",
                                      pydsdoc::metaschema::CustomObjectDoc::descr)
                 .def(py::init<>())
                 .def_property("var1", STRING_PROPERTY(NvDsCustomObject, var1))
                 .def_property("var2", STRING_PROPERTY(NvDsCustomObject, var2))  
                 .def("cast",
                    [](void *data) {
                          return (NvDsCustomObject *) data;
                    },
                    py::return_value_policy::reference,
                    pydsdoc::metaschema::CustomObjectDoc::cast)  

                 .def("cast",
                    [](size_t data) {
                          return (NvDsCustomObject *) data;
                    },
                    py::return_value_policy::reference,
                    pydsdoc::metaschema::CustomObjectDoc::cast);

        m.def("alloc_nvds_custom_object",
              []() {
                 auto *object = (NvDsCustomObject *) g_malloc0(
                          sizeof(NvDsCustomObject));
                 return object;
              },
              py::return_value_policy::reference,
              pydsdoc::methodsDoc::alloc_nvds_custom_object);  

/opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/bindings/docstrings/schemadoc.h

      namespace CustomObjectDoc   
      {
         constexpr const char* descr = R"pyds(
               Holds bla... .
               
               :ivar var1: *str*, abc of the object.
               :ivar var2: *str*, def of the object.)pyds";


         constexpr const char* cast=R"pyds(cast given object/data to :class:`NvDsCustomObject`, call pyds.NvDsCustomObject.cast(data))pyds";
      }

/opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/bindings/docstrings/functionsdoc.h

         constexpr const char* alloc_nvds_custom_object=R"pyds( 
            Allocate an :class:`NvDsCustomObject`. 

            :returns: Allocated :class:`NvDsCustomObject`)pyds";

/opt/nvidia/deepstream/deepstream/sources/libs/nvmsgconv/deepstream_schema/eventmsg_payload.cpp

    case NVDS_OBJECT_TYPE_CUSTOM:
      if (meta->extMsgSize) {
        NvDsCustomObject *dsObj = (NvDsCustomObject *) meta->extMsg;
        if (dsObj) {
          json_object_set_string_member (jobject, "var1", dsObj->var1);
          json_object_set_string_member (jobject, "var2", dsObj->var2);
          // json_object_set_double_member (jobject, "ghi", meta->ghi);
        }
      } else {
        // No object in meta data. Attach empty sub object.
        json_object_set_string_member (jobject, "var1", "");
        json_object_set_string_member (jobject, "var2", "");
        // json_object_set_double_member (jobject, "ghi", 42);
      }
      json_object_set_object_member (objectObj, "custom", jobject);
      break;

/opt/nvidia/deepstream/deepstream/sources/includes/nvdsmeta_schema.h

typedef struct NvDsCustomObject {
  gchar *var1;
  gchar *var2;


} NvDsCustomObject;

/opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/bindings/src/bindfunctions.cpp

        /**
         * Type casting to @NvDsCustomObject
         */
        m.def("glist_get_nvds_custom_object",
              [](void *data) {
                  return (NvDsCustomObject *) data;
              },
              py::return_value_policy::reference);

After that, I:

  1. rebuild the python wheels and reinstalled
  2. built a new libnvds_msgconv.so

Then modified the function generate the message:

def generate_event_msg_meta(data, class_id):
    
    meta = pyds.NvDsEventMsgMeta.cast(data)
    # meta.sensorId = 0
    # meta.placeId = 0
    # meta.moduleId = 0
    # meta.sensorStr = "sensor-0"
    meta.ts = pyds.alloc_buffer(MAX_TIME_STAMP_LEN + 1)
    pyds.generate_ts_rfc3339(meta.ts, MAX_TIME_STAMP_LEN)

    # meta.type = pyds.NvDsEventType.NVDS_EVENT_MOVING
    meta.objType = pyds.NvDsObjectType.NVDS_OBJECT_TYPE_CUSTOM

    data = pyds.alloc_nvds_custom_object()
    obj = pyds.NvDsCustomObject.cast(data)

    obj.var1 = "hello"
    obj.var2 = "world"

    meta.extMsg = obj
    meta.extMsgSize = sys.getsizeof(pyds.NvDsCustomObject)
    
    print(meta)
    return meta


However, it gets stuck... after first inference...

0:00:06.812640531   547 0x56491292a760 DEBUG               GST_PADS gstpad.c:3713:probe_hook_marshal:<onscreendisplay:sink> probe returned OK
0:00:06.812670559   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4443:gst_pad_chain_data_unchecked:<onscreendisplay:sink> calling chainfunction &gst_base_transform_chain with buffer buffer: 0x5649519a26c0, pts 0:00:00.000000000, dts 99:99:99.999999999, dur 99:99:99.999999999, size 64, offset none, offset_end none, flags 0x0
0:00:06.812684316   547 0x56491292a760 DEBUG          basetransform gstbasetransform.c:2047:default_submit_input_buffer:<onscreendisplay> handling buffer 0x5649519a26c0 of size 64, PTS 0:00:00.000000000 and offset NONE
0:00:06.812691927   547 0x56491292a760 DEBUG          basetransform gstbasetransform.c:2162:default_generate_output:<onscreendisplay> calling prepare buffer
0:00:06.812698142   547 0x56491292a760 DEBUG          basetransform gstbasetransform.c:1652:default_prepare_output_buffer:<onscreendisplay> passthrough: reusing input buffer
0:00:06.812705893   547 0x56491292a760 DEBUG          basetransform gstbasetransform.c:2168:default_generate_output:<onscreendisplay> using allocated buffer in 0x5649519a26c0, out 0x5649519a26c0
0:00:06.812711969   547 0x56491292a760 DEBUG          basetransform gstbasetransform.c:2177:default_generate_output:<onscreendisplay> doing passthrough transform_ip
0:00:06.830949583   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4443:gst_pad_chain_data_unchecked:<nvsink-tee:sink> calling chainfunction &gst_tee_chain with buffer buffer: 0x5649519a26c0, pts 0:00:00.000000000, dts 99:99:99.999999999, dur 99:99:99.999999999, size 64, offset none, offset_end none, flags 0x0
0:00:06.830970672   547 0x56491292a760 DEBUG                    tee gsttee.c:1024:gst_tee_chain:<nvsink-tee> received buffer 0x5649519a26c0
0:00:06.830982893   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4443:gst_pad_chain_data_unchecked:<nvtee-que1:sink> calling chainfunction &gst_queue_chain with buffer buffer: 0x5649519a26c0, pts 0:00:00.000000000, dts 99:99:99.999999999, dur 99:99:99.999999999, size 64, offset none, offset_end none, flags 0x0
0:00:06.830996789   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4449:gst_pad_chain_data_unchecked:<nvtee-que1:sink> called chainfunction &gst_queue_chain with buffer 0x5649519a26c0, returned ok
0:00:06.831008311   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4443:gst_pad_chain_data_unchecked:<nvtee-que2:sink> calling chainfunction &gst_queue_chain with buffer buffer: 0x5649519a26c0, pts 0:00:00.000000000, dts 99:99:99.999999999, dur 99:99:99.999999999, size 64, offset none, offset_end none, flags 0x0
0:00:06.831016761   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4449:gst_pad_chain_data_unchecked:<nvtee-que2:sink> called chainfunction &gst_queue_chain with buffer 0x5649519a26c0, returned ok
0:00:06.831023814   547 0x56491292a760 DEBUG                    tee gsttee.c:1028:gst_tee_chain:<nvsink-tee> handled buffer ok
0:00:06.831021440   547 0x56491292a800 DEBUG         queue_dataflow gstqueue.c:1532:gst_queue_loop:<nvtee-que1> queue is not empty
0:00:06.831031426   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4449:gst_pad_chain_data_unchecked:<nvsink-tee:sink> called chainfunction &gst_tee_chain with buffer 0x5649519a26c0, returned ok
0:00:06.831062431   547 0x56491292a800 DEBUG         GST_SCHEDULING gstpad.c:4443:gst_pad_chain_data_unchecked:<nvmsg-converter:sink> calling chainfunction &gst_base_transform_chain with buffer buffer: 0x5649519a26c0, pts 0:00:00.000000000, dts 99:99:99.999999999, dur 99:99:99.999999999, size 64, offset none, offset_end none, flags 0x0
0:00:06.831064805   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4449:gst_pad_chain_data_unchecked:<onscreendisplay:sink> called chainfunction &gst_base_transform_chain with buffer 0x5649519a26c0, returned ok
0:00:06.831079679   547 0x56491292a800 DEBUG          basetransform gstbasetransform.c:2047:default_submit_input_buffer:<nvmsg-converter> handling buffer 0x5649519a26c0 of size 64, PTS 0:00:00.000000000 and offset NONE
0:00:06.831100698   547 0x56491292a800 DEBUG          basetransform gstbasetransform.c:2162:default_generate_output:<nvmsg-converter> calling prepare buffer
0:00:06.831091481   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4449:gst_pad_chain_data_unchecked:<convertor:sink> called chainfunction &gst_base_transform_chain with buffer 0x5649519a26c0, returned ok
0:00:06.831108240   547 0x56491292a800 DEBUG          basetransform gstbasetransform.c:1652:default_prepare_output_buffer:<nvmsg-converter> passthrough: reusing input buffer
0:00:06.831125838   547 0x56491292a800 DEBUG          basetransform gstbasetransform.c:2168:default_generate_output:<nvmsg-converter> using allocated buffer in 0x5649519a26c0, out 0x5649519a26c0
0:00:06.831133100   547 0x56491292a800 DEBUG          basetransform gstbasetransform.c:2177:default_generate_output:<nvmsg-converter> doing passthrough transform_ip
0:00:06.831142109   547 0x56491292a800 DEBUG              nvmsgconv gstnvmsgconv.cpp:724:gst_nvmsgconv_transform_ip_video_audio:<nvmsg-converter> transform_ip
No entry for place0 in configuration file
No entry for sensor0 in configuration file
0:00:06.832205991   547 0x56491292a760 DEBUG         GST_SCHEDULING gstpad.c:4443:gst_pad_chain_data_unchecked:<convertor:sink> calling chainfunction &gst_base_transform_chain with buffer buffer: 0x5649519a2480, pts 0:00:00.033333333, dts 99:99:99.999999999, dur 99:99:99.999999999, size 64, offset none, offset_end none, flags 0x0
Segmentation fault (core dumped)

This is my pipeline: Processing: graph.pdf…

Please help, totally lost…

Alos modified further lines in bindschema.cpp but no progress…

###
        if (srcData->extMsgSize > 0) {
            if (srcData->objType == NVDS_OBJECT_TYPE_VEHICLE) {
                NvDsVehicleObject *srcObj = (NvDsVehicleObject *) srcData->extMsg;
                NvDsVehicleObject *obj =
                        (NvDsVehicleObject *) g_malloc0 (sizeof (NvDsVehicleObject));
                if (srcObj->type)
                obj->type = g_strdup (srcObj->type);
                if (srcObj->make)
                obj->make = g_strdup (srcObj->make);
                if (srcObj->model)
                obj->model = g_strdup (srcObj->model);
                if (srcObj->color)
                obj->color = g_strdup (srcObj->color);
                if (srcObj->license)
                obj->license = g_strdup (srcObj->license);
                if (srcObj->region)
                obj->region = g_strdup (srcObj->region);
                destData->extMsg = obj;
                destData->extMsgSize = sizeof (NvDsVehicleObject);
            } 
            if (srcData->objType == NVDS_OBJECT_TYPE_PERSON) {
                NvDsPersonObject *srcObj = (NvDsPersonObject *) srcData->extMsg;
                NvDsPersonObject *obj =
                    (NvDsPersonObject *) g_malloc0 (sizeof (NvDsPersonObject));
                obj->age = srcObj->age;
                if (srcObj->gender)
                    obj->gender = g_strdup (srcObj->gender);
                if (srcObj->cap)
                    obj->cap = g_strdup (srcObj->cap);
                if (srcObj->hair)
                    obj->hair = g_strdup (srcObj->hair);
                if (srcObj->apparel)
                    obj->apparel = g_strdup (srcObj->apparel);
                destData->extMsg = obj;
                destData->extMsgSize = sizeof (NvDsPersonObject);
            }
            if (srcData->objType == NVDS_OBJECT_TYPE_CUSTOM) {
                NvDsCustomObject *srcObj = (NvDsCustomObject *) srcData->extMsg;
                NvDsCustomObject *obj =
                    (NvDsCustomObject *) g_malloc0 (sizeof (NvDsCustomObject));
                if (srcObj->var1)
                    obj->var1 = g_strdup (srcObj->var1);
                if (srcObj->var2)
                    obj->var2 = g_strdup (srcObj->var2);
                destData->extMsg = obj;
                destData->extMsgSize = sizeof (NvDsCustomObject);
            }
        }

###

                if (srcData->extMsgSize > 0) {
                    if (srcData->objType == NVDS_OBJECT_TYPE_VEHICLE) {
                        NvDsVehicleObject *obj = (NvDsVehicleObject *) srcData->extMsg;
                        if (obj->type)
                            g_free (obj->type);
                        if (obj->color)
                            g_free (obj->color);
                        if (obj->make)
                            g_free (obj->make);
                        if (obj->model)
                            g_free (obj->model);
                        if (obj->license)
                            g_free (obj->license);
                        if (obj->region)
                            g_free (obj->region);
                    }
                    if (srcData->objType == NVDS_OBJECT_TYPE_PERSON) {
                        NvDsPersonObject *obj = (NvDsPersonObject *) srcData->extMsg;
                        if (obj->gender)
                            g_free (obj->gender);
                        if (obj->cap)
                            g_free (obj->cap);
                        if (obj->hair)
                            g_free (obj->hair);
                        if (obj->apparel)
                            g_free (obj->apparel);
                    }
                    if (srcData->objType == NVDS_OBJECT_TYPE_CUSTOM) {
                        NvDsCustomObject *obj = (NvDsCustomObject *) srcData->extMsg;
                        if (obj->var1)
                            g_free (obj->var1);
                        if (obj->var2)
                            g_free (obj->var2);
                    }
                    g_free (srcData->extMsg);
                    srcData->extMsgSize = 0;
                }

@ curious_cat

Could you please create your own Topic and delete your questions? Your questions have nothing to do with this post!