Integrate back to back detectors with deep stream test5 application

±----------------------------------------------------------------------------+
| NVIDIA-SMI 520.56.06 Driver Version: 520.56.06 CUDA Version: 11.8 |
|-------------------------------±---------------------±---------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA GeForce … Off | 00000000:01:00.0 Off | N/A |
| N/A 35C P3 N/A / N/A | 5MiB / 6144MiB | 0% Default |
| | | N/A |
±------------------------------±---------------------±---------------------+

±----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| 0 N/A N/A 2491 G /usr/lib/xorg/Xorg 4MiB |
±----------------------------------------------------------------------------+

Deep stream version:6.2

We want to implement the back to back detectors for Deep stream test5 application. When we worked on the above link from deep stream reference app, we were not able to visualization any results. We need your guidance on integrating this back to back detectors with test5 sdk. Thank you!

is there no any bounding box? how many models are there? is the second model based on the first model’s otuput?
if there is no the first model’s output bbox, you need to check if the preprocess parameters are correct, if still failed, please share the first model’s configuration file.

The below is our sample config file, we need to understand how to add two models as primary detectors, both the models has to take the whole frame as input. The back to back detectors documentation suggest some ways to do this, but we are not sure how to use this in deep stream test5 app.I am also attaching a sample reference config files how we want two primary gies

##config_infer.txt ##
[application]
enable-perf-measurement = 1
perf-measurement-interval-sec = 5

[tiled-display]
enable = 0
rows = 2
columns = 2
width = 1280
height = 720
gpu-id = 0
nvbuf-memory-type = 0

[osd]
enable = 1
gpu-id = 0
border-width = 1
text-size = 15
text-color = 1;1;1;1;
text-bg-color = 0.3;0.3;0.3;1
font = Arial
show-clock = 0
clock-x-offset = 800
clock-y-offset = 820
clock-text-size = 12
clock-color = 1;0;0;0
nvbuf-memory-type = 0

[streammux]
gpu-id=0
##Boolean property to inform muxer that sources are live
live-source=0
batch-size=1
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=40000

Set muxer output width and height

width=1920
height=1080
#enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0

config-file property is mandatory for any gie section.

Other properties are optional and if set will override the properties set in

the infer config file.

[primary-gie0]
enable=0
gpu-id=0
#model-engine-file=model_b1_gpu0_int8.engine
labelfile-path=rac_labels.txt
batch-size=1
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
gie-unique-id=1
nvbuf-memory-type=0
config-file=config_infer_primary_rac.txt

[primary-gie1]
enable=1
gpu-id=0
#model-engine-file=model_b1_gpu0_int8.engine
labelfile-path=labels.txt
batch-size=1
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
gie-unique-id=2
nvbuf-memory-type=0
config-file=config_infer_primary_rac_1.txt

[tracker]
enable=1

For NvDCF and DeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively

tracker-width=640
tracker-height=384
ll-lib-file=/opt/nvidia/deepstream/deepstream-6.2/lib/libnvds_nvmultiobjecttracker.so

ll-config-file required to set different tracker types

ll-config-file=…/…/samples/configs/deepstream-app/config_tracker_IOU.yml

ll-config-file=…/…/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml

ll-config-file=…/…/samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml

ll-config-file=…/…/samples/configs/deepstream-app/config_tracker_DeepSORT.yml

gpu-id=0
enable-batch-process=1
enable-past-frame=1
display-tracking-id=1

[tests]
file-loop=0

[source0]
enable = 1
type = 3
uri=file:///opt/nvidia/deepstream/deepstream-6.2/sources/apps/sample_apps/deepstreamtest5/videos/sample.mp4

gpu-id = 0
nvbuf-memory-type = 0
latency = 200

[sink0]
enable = 1
type = 4
codec = 1
sync = 0
qos = 1
encoder type 0=Hardware 1=Software
enc-type=1
source-id = 0
gpu-id = 0
bitrate = 1000000
rtsp-port = 8554
#width = 640
#height = 480
#udp-port = 5400

[sink6]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 6=MsgConvBroker
type=6
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM - Custom schema payload
msg-conv-payload-type=1
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream-6.2/lib/libnvds_kafka_proto.so
#msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream-6.2/lib/libnvds_msgconv.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=192.168.0.118;9092
topic=quickstart-events
#Optional:
#msg-broker-config=…/…/deepstream-test4/cfg_kafka.txt

###config_infer_primary.txt### -The primary detecor config file
[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
model-color-format=0
custom-network-config=/opt/nvidia/deepstream/deepstream-6.2/sources/apps/sample_apps/deepstreamtest5/models/yolov3.cfg
model-file=/opt/nvidia/deepstream/deepstream-6.2/sources/apps/sample_apps/deepstreamtest5/models/yolov3.weights
#model-engine-file=models/model_b1_gpu0_fp16.engine
model-engine-file=/opt/nvidia/deepstream/deepstream-6.2/sources/apps/sample_apps/deepstreamtest5/model_b1_gpu0_fp16.engine
#int8-calib-file=calib.table
labelfile-path=labels.txt
batch-size=1
network-mode=2
num-detected-classes=80
interval=0
gie-unique-id=2
process-mode=1
network-type=0
cluster-mode=4
#cluster-mode=2
maintain-aspect-ratio=1
parse-bbox-func-name=NvDsInferParseYolo
custom-lib-path=/opt/nvidia/deepstream/deepstream-6.2/sources/apps/sample_apps/deepstreamtest5/nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so
engine-create-func-name=NvDsInferYoloCudaEngineGet
#filter-out-class-ids=1;2;3;4;5;6;7;8;9;10;11;12;13;14;15;16;17;18;19;20;21;22;23;24;25;26;27;28;29;30;31;32;33;34;35;36;37;38;39;40;41;42;43;44;45#;46;47;48;49;50;51;52;53;54;55;56;57;58;59;60;61;62;63>
[class-attrs-all]
#pre-cluster-threshold=0
nms-iou-threshold=0.45
pre-cluster-threshold=0.25
topk=300
config_infer.txt (5.6 KB)
config_infer_primary.txt (1.2 KB)

deepstream-app code is opensource. currently deepstream-app only supports one pgie and many sgie, you can find NvDsPrimaryGieBin primary_gie_bin in sdk.
sgie’ process-mode is set to hardcode 2, it can’t process the whole frame.
you can modify deepstream-test2 to customize.

Can you please tell me where I could find this NvDsPrimaryGieBin primary_gie_bin. Also please suggest which sdk can give the inference on primary and secondary detectors

deepstream-app is based on deepstream sdk. you can find NvDsPrimaryGieBin primary_gie_bin in opt\nvidia\deepstream\deepstream-6.2\sources\apps\sample_apps\deepstream-app\deepstream_app.h.
please modify deepstream-test2 to do inference on the two models. the path is \opt\nvidia\deepstream\deepstream-6.2\sources\apps\sample_apps\deepstream-test2.

[property]
gpu-id=0
net-scale-factor=1
model-file=…/…/models/Secondary_VehicleTypes/resnet18.caffemodel
proto-file=…/…/models/Secondary_VehicleTypes/resnet18.prototxt
model-engine-file=…/…/models/Secondary_VehicleTypes/resnet18.caffemodel_b16_gpu0_int8.engine
int8-calib-file=…/…/models/Secondary_VehicleTypes/cal_trt.bin
mean-file=…/…/models/Secondary_VehicleTypes/mean.ppm
labelfile-path=…/…/models/Secondary_VehicleTypes/labels.txt
force-implicit-batch-dim=1
batch-size=16
model-color-format=1

0=FP32, 1=INT8, 2=FP16 mode

network-mode=1
is-classifier=1
#process-mode=2
process-mode=1
output-blob-names=predictions/Softmax
classifier-async-mode=1
classifier-threshold=0.51
input-object-min-width=128
input-object-min-height=128
operate-on-gie-id=1
operate-on-class-ids=0
classifier-type=vehicletype
#scaling-filter=0
#scaling-compute-hw=0

You mean to say to change the process-mode = 1 in the secondary config file to make the secondary model acess the whole frame.

no, please refer to create_secondary_gie in \opt\nvidia\deepstream\deepstream-6.2\sources\apps\apps-common\src\deepstream_secondary_gie_bin.c, sgie’s process-mode is set to hardcode 2.
deepstream-app and deepstream sdk are opensource, you can modify the code.

Thank you. I am able to pass the entire frame to the secondary GIE. But the tracker is working only for the Primary GIE. How to modify the tracker section code to adapt deep stream pipeline to support multiple GIES.

in default code, the tracker is between pgie and sgies, you need to put tracker behind the sgies.
please refer to create_common_elements in deepstream_app.c.

Please help me to understand this function , which section of code is responsible for enabling the tracker for secondary GIE

**

  • Function to create common elements(Primary infer, tracker, secondary infer)
  • of the pipeline. These components operate on muxed data from all the
  • streams. So they are independent of number of streams in the pipeline.
    */
    static gboolean
    create_common_elements (NvDsConfig * config, NvDsPipeline * pipeline,
    GstElement ** sink_elem, GstElement ** src_elem,
    bbox_generated_callback bbox_generated_post_analytics_cb)
    {
    gboolean ret = FALSE;
    *sink_elem = *src_elem = NULL;

if (config->primary_gie_config.enable) {
if (config->num_secondary_gie_sub_bins > 0) {
/** if using nvmultiurisrcbin, override batch-size config for sgie */
if (config->use_nvmultiurisrcbin) {
for (guint i = 0; i < config->num_secondary_gie_sub_bins; i++) {
config->secondary_gie_sub_bin_config[i].batch_size =
config->sgie_batch_size;
}
}
if (!create_secondary_gie_bin (config->num_secondary_gie_sub_bins,
config->primary_gie_config.unique_id,
config->secondary_gie_sub_bin_config,
&pipeline->common_elements.secondary_gie_bin)) {
goto done;
}
gst_bin_add (GST_BIN (pipeline->pipeline),
pipeline->common_elements.secondary_gie_bin.bin);
if (!*src_elem) {
*src_elem = pipeline->common_elements.secondary_gie_bin.bin;
}
if (*sink_elem) {
NVGSTDS_LINK_ELEMENT (pipeline->common_elements.secondary_gie_bin.bin,
*sink_elem);
}
*sink_elem = pipeline->common_elements.secondary_gie_bin.bin;
}
}

if (config->primary_gie_config.enable) {
if (config->num_secondary_preprocess_sub_bins > 0) {
if (!create_secondary_preprocess_bin (config->
num_secondary_preprocess_sub_bins,
config->primary_gie_config.unique_id,
config->secondary_preprocess_sub_bin_config,
&pipeline->common_elements.secondary_preprocess_bin)) {
g_print (“creating secondary_preprocess bin failed\n”);
goto done;
}
gst_bin_add (GST_BIN (pipeline->pipeline),
pipeline->common_elements.secondary_preprocess_bin.bin);

  if (!*src_elem) {
    *src_elem = pipeline->common_elements.secondary_preprocess_bin.bin;
  }
  if (*sink_elem) {
    NVGSTDS_LINK_ELEMENT (pipeline->common_elements.
        secondary_preprocess_bin.bin, *sink_elem);
  }

  *sink_elem = pipeline->common_elements.secondary_preprocess_bin.bin;
}

}

if (config->dsanalytics_config.enable) {
if (!create_dsanalytics_bin (&config->dsanalytics_config,
&pipeline->common_elements.dsanalytics_bin)) {
g_print (“creating dsanalytics bin failed\n”);
goto done;
}
gst_bin_add (GST_BIN (pipeline->pipeline),
pipeline->common_elements.dsanalytics_bin.bin);

if (!*src_elem) {
  *src_elem = pipeline->common_elements.dsanalytics_bin.bin;
}
if (*sink_elem) {
  NVGSTDS_LINK_ELEMENT (pipeline->common_elements.dsanalytics_bin.bin,
      *sink_elem);
}
*sink_elem = pipeline->common_elements.dsanalytics_bin.bin;

}

if (config->tracker_config.enable) {
if (!create_tracking_bin (&config->tracker_config,
&pipeline->common_elements.tracker_bin)) {
g_print (“creating tracker bin failed\n”);
goto done;
}
gst_bin_add (GST_BIN (pipeline->pipeline),
pipeline->common_elements.tracker_bin.bin);
if (!*src_elem) {
*src_elem = pipeline->common_elements.tracker_bin.bin;
}
if (*sink_elem) {
NVGSTDS_LINK_ELEMENT (pipeline->common_elements.tracker_bin.bin,
*sink_elem);
}
*sink_elem = pipeline->common_elements.tracker_bin.bin;
}

if (config->primary_gie_config.enable) {
/** if using nvmultiurisrcbin, override batch-size config for pgie */
if (config->use_nvmultiurisrcbin) {
config->primary_gie_config.batch_size = config->max_batch_size;
}
if (!create_primary_gie_bin (&config->primary_gie_config,
&pipeline->common_elements.primary_gie_bin)) {
goto done;
}
gst_bin_add (GST_BIN (pipeline->pipeline),
pipeline->common_elements.primary_gie_bin.bin);
if (*sink_elem) {
NVGSTDS_LINK_ELEMENT (pipeline->common_elements.primary_gie_bin.bin,
*sink_elem);
}
*sink_elem = pipeline->common_elements.primary_gie_bin.bin;
if (!*src_elem) {
*src_elem = pipeline->common_elements.primary_gie_bin.bin;
}
NVGSTDS_ELEM_ADD_PROBE (pipeline->
common_elements.primary_bbox_buffer_probe_id,
pipeline->common_elements.primary_gie_bin.bin, “src”,
gie_primary_processing_done_buf_prob, GST_PAD_PROBE_TYPE_BUFFER,
pipeline->common_elements.appCtx);
}

if (config->preprocess_config.enable) {
if (!create_preprocess_bin (&config->preprocess_config,
&pipeline->common_elements.preprocess_bin)) {
g_print (“creating preprocess bin failed\n”);
goto done;
}
gst_bin_add (GST_BIN (pipeline->pipeline),
pipeline->common_elements.preprocess_bin.bin);

if (!*src_elem) {
  *src_elem = pipeline->common_elements.preprocess_bin.bin;
}
if (*sink_elem) {
  NVGSTDS_LINK_ELEMENT (pipeline->common_elements.preprocess_bin.bin,
      *sink_elem);
}

*sink_elem = pipeline->common_elements.preprocess_bin.bin;

}

if (*src_elem) {
NVGSTDS_ELEM_ADD_PROBE (pipeline->
common_elements.primary_bbox_buffer_probe_id, *src_elem, “src”,
analytics_done_buf_prob, GST_PAD_PROBE_TYPE_BUFFER,
&pipeline->common_elements);

/* Add common message converter */
if (config->msg_conv_config.enable) {
  NvDsSinkMsgConvBrokerConfig *convConfig = &config->msg_conv_config;
  pipeline->common_elements.msg_conv =
      gst_element_factory_make (NVDS_ELEM_MSG_CONV, "common_msg_conv");
  if (!pipeline->common_elements.msg_conv) {
    NVGSTDS_ERR_MSG_V ("Failed to create element 'common_msg_conv'");
    goto done;
  }

  g_object_set (G_OBJECT (pipeline->common_elements.msg_conv),
      "config", convConfig->config_file_path,
      "msg2p-lib",
      (convConfig->conv_msg2p_lib ? convConfig->conv_msg2p_lib : "null"),
      "payload-type", convConfig->conv_payload_type, "comp-id",
      convConfig->conv_comp_id, "debug-payload-dir",
      convConfig->debug_payload_dir, "multiple-payloads",
      convConfig->multiple_payloads, NULL);

  gst_bin_add (GST_BIN (pipeline->pipeline),
      pipeline->common_elements.msg_conv);

  NVGSTDS_LINK_ELEMENT (*src_elem, pipeline->common_elements.msg_conv);
  *src_elem = pipeline->common_elements.msg_conv;
}
pipeline->common_elements.tee =
    gst_element_factory_make (NVDS_ELEM_TEE, "common_analytics_tee");
if (!pipeline->common_elements.tee) {
  NVGSTDS_ERR_MSG_V ("Failed to create element 'common_analytics_tee'");
  goto done;
}

gst_bin_add (GST_BIN (pipeline->pipeline), pipeline->common_elements.tee);

NVGSTDS_LINK_ELEMENT (*src_elem, pipeline->common_elements.tee);
*src_elem = pipeline->common_elements.tee;

}

ret = TRUE;
done:
return ret;
}
I assume this the function you are mentioned. Please can you help me to understand this pipeline to enable tracker for secondary gie

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.