Memory leak using new nvstreammux

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU):dGPU
• DeepStream Version:6.1.1
• TensorRT Version:8.4.1
• NVIDIA GPU Driver Version (valid for GPU only):515.65.01
• Issue Type( questions, new requirements, bugs):questions
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)

I ran the deepstream-test5-app sample using new nvstreammux for 1 day.
I also measured the RSS of that program with ps -aux.
When I checked the results, it seemed to have a 528KB memory leak.
How can I fix this memory leak?

I ran this command in thre docker container.

command

cd /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test5/configs/
# change the configuration file and RTSP streaming
export USE_NEW_NVSTREAMMUX=yes
/opt/nvidia/deepstream/deepstream/bin/deepstream-test5-app -c test5_config_file_src_infer.txt

I use this ocnfig file

[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl

[tiled-display]
enable=0 # change to disable
rows=2
columns=2
width=1280
height=720
gpu-id=0
#(0): nvbuf-mem-default - Default memory allocated, specific to particular platform
#(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla
#(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla
#(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla
#(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson
nvbuf-memory-type=0


[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=4 # change to RTSP type
uri=rtsp://<IP address>:554/test.mpeg4 # change to RTSP URI
num-sources=2
gpu-id=0
nvbuf-memory-type=0

[source1]
enable=0 # change to disable
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=3
uri=file://../../../../../samples/streams/sample_1080p_h264.mp4
num-sources=2
gpu-id=0
nvbuf-memory-type=0

[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=1 # change to fakesink type
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0

[sink1]
enable=0 # change to disable
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvdrmvideosink 6=MsgConvBroker
type=6
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=<host>;<port>;<topic>
topic=<topic>
#Optional:
#msg-broker-config=../../deepstream-test4/cfg_kafka.txt

[sink2]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
## only SW mpeg4 is supported right now.
codec=3
sync=1
bitrate=2000000
output-file=out.mp4
source-id=0

# sink type = 6 by default creates msg converter + broker.
# To use multiple brokers use this group for converter and use
# sink type = 6 with disable-msgconv = 1
[message-converter]
enable=0
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Name of library having custom implementation.
#msg-conv-msg2p-lib=<val>
# Id of component in case only selected message to parse.
#msg-conv-comp-id=<val>

# Configure this group to enable cloud message consumer.
[message-consumer0]
enable=0
proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
conn-str=<host>;<port>
config-file=<broker config file e.g. cfg_kafka.txt>
subscribe-topic-list=<topic1>;<topic2>;<topicN>
# Use this option if message has sensor name as id instead of index (0,1,2 etc.).
#sensor-list-file=dstest5_msgconv_sample_config.txt

[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0

[streammux]
gpu-id=0
##Boolean property to inform muxer that sources are live
live-source=0
batch-size=4
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=40000
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
## If set to TRUE, system timestamp will be attached as ntp timestamp
## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
# attach-sys-ts-as-ntp=1

[primary-gie]
enable=1
gpu-id=0
batch-size=1 # change to 1
## 0=FP32, 1=INT8, 2=FP16 mode
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;1;1;1
bbox-border-color3=0;1;0;1
nvbuf-memory-type=0
interval=0
gie-unique-id=1
model-engine-file=../../../../../samples/models/Primary_Detector/resnet10.caffemodel_b4_gpu0_int8.engine
labelfile-path=../../../../../samples/models/Primary_Detector/labels.txt
config-file=../../../../../samples/configs/deepstream-app/config_infer_primary.txt
#infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/

[tracker]
enable=1
# For NvDCF and DeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=640
tracker-height=384
ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
# ll-config-file required to set different tracker types
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_IOU.yml
ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_DeepSORT.yml
gpu-id=0
enable-batch-process=1
enable-past-frame=1
display-tracking-id=1

[tests]
file-loop=0
  1. can you use the latest DeepStream nvcr.io/nvidia/deepstream:7.0-samples-multiarch? Thanks.
  2. if you still find memory leak, please refer to this topic. can you provide more logs by nvmemstat ? it can save the memory usage at intervals. can you provide more valgrind logs by this method vagrid , it can give memory details. BTW, new nvstreammux is opensource. please refer to /opt/nvidia/deepstream/deepstream/sources/gst-plugins/gst-nvmultistream2 if interested.

I refer to /opt/nvidia/deepstream/deepstream/sources/gst-plugins/gst-nvmultistream2.
And I modified the source code that I think is causing the memory leak.
However, the source code with the following modifications still leaked memory.

  1. is my fix below correct?
  2. where else should I fix?
diff --git a/gstnvstreamdemux.cpp b/gstnvstreamdemux.cpp
index e296fbc2..eda9cdd2 100644
--- a/gstnvstreamdemux.cpp
+++ b/gstnvstreamdemux.cpp
@@ -596,12 +596,15 @@ set_src_pad_caps (GstNvStreamDemux * nvstreamdemux, gint index, gint width_val,
     GstEvent *event;
     GstCaps *other_caps = NULL;
     GstCapsFeatures *features = gst_caps_features_from_string ("memory:NVMM");
-
+    printf("gst_caps_features_from_string (\"memory:NVMM\")\n"); /* add */
     keys = keys->next;
 
     LOGD("pad=%p\n", pad);
     if (!pad)
+    {
+      gst_caps_features_free(features); /* add */
       continue;
+    }
 
     /** query the muxer for this stream's caps and use that if available */
     if(GST_ELEMENT_CAST (nvstreamdemux)->sinkpads) {
@@ -716,6 +719,7 @@ set_src_pad_caps (GstNvStreamDemux * nvstreamdemux, gint index, gint width_val,
 
       }
     }
+    gst_caps_features_free(features); /* add */
   }
 
   return ret;
@@ -765,6 +769,7 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
   if (GST_EVENT_TYPE (event) == GST_NVEVENT_UPDATE_CAPS) {
 	  const GstStructure *const str = gst_event_get_structure(event);
 	  LOGD("Got update-caps event\n");
+	  printf("Got update-caps event\n"); /* add print */
 	  GstCaps *new_caps;
 	  GstStructure *new_caps_str;
 	  guint stream_index;
@@ -794,6 +799,10 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
 			  fr);
 	  ret = set_src_pad_caps (nvstreamdemux, stream_index, width_val, height_val, stream_id);
 	  g_mutex_unlock (&nvstreamdemux->ctx_lock);
+
+    printf("KEY : %u\tSIZE : %zu\n", stream_index, sizeof (GValue)); /* add print */
+
+
 	  //GST_OBJECT_UNLOCK (nvstreamdemux);
 
 	  return ret;
@@ -846,20 +855,25 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
   }
 
   if (GST_EVENT_TYPE (event) == GST_NVEVENT_STREAM_SEGMENT) {
+    printf("got STREAM_SEGMENT in demux\n"); /* add */
     GstSegment *segment;
     GstPad *src_pad = NULL;
     guint source_id = 0;
+    gboolean ret = FALSE; /* add */
     gst_nvevent_parse_stream_segment (event, &source_id,
         &segment);
     src_pad =
       GST_PAD (g_hash_table_lookup (nvstreamdemux->pad_indexes,
             source_id + (char *)NULL));
     if (!src_pad) {
+      gst_segment_free(segment); /* add */
       return TRUE;
     }
 
     LOGD("sending segment event on pad %d\n", source_id);
-    return gst_pad_push_event (src_pad, gst_event_new_segment (segment));
+    ret = gst_pad_push_event (src_pad, gst_event_new_segment (segment));
+    gst_segment_free(segment); /* add */
+    return ret;
   }
 
   if ((GST_EVENT_TYPE (event) == GST_NVEVENT_PAD_ADDED) ||
@@ -870,6 +884,7 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
 
   if (GST_EVENT_TYPE (event) == GST_NVEVENT_STREAM_START) {
     LOGD("got STREAM_START in demux\n");
+    printf("got STREAM_START in demux\n"); /* add */
     GstPad *src_pad = NULL;
     gchar* stream_id = NULL;
     guint source_id = 0;
@@ -880,11 +895,13 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
       GST_PAD (g_hash_table_lookup (nvstreamdemux->pad_indexes,
             source_id + (char *)NULL));
     if (!src_pad) {
+      g_free(stream_id); /* add */
       return TRUE;
     }
 
     LOGD("sending stream-start event on pad %d stream_id=%s\n", source_id, stream_id);
     send_stream_start_if_not_already_sent(nvstreamdemux, source_id, stream_id, src_pad);
+    g_free(stream_id); /* add */
     return TRUE;
   }
 
@@ -958,6 +975,13 @@ gst_nvstreamdemux_2_class_init (GstNvStreamDemuxClass * klass)
       GST_DEBUG_FUNCPTR (gst_nvstreamdemux_release_pad);
 }
 
+void destory_func(gpointer val) /* add */
+{
+  printf("Call destroy_func\n");
+  g_value_unset((GValue *)val);
+  g_free(val);
+}
+
 static void
 gst_nvstreamdemux_2_init (GstNvStreamDemux * nvstreamdemux)
 {
@@ -977,7 +1001,7 @@ gst_nvstreamdemux_2_init (GstNvStreamDemux * nvstreamdemux)
   gst_element_add_pad (GST_ELEMENT (nvstreamdemux), nvstreamdemux->sinkpad);
 
   nvstreamdemux->pad_indexes = g_hash_table_new (NULL, NULL);
-  nvstreamdemux->pad_framerates = g_hash_table_new (NULL, NULL);
+  nvstreamdemux->pad_framerates = g_hash_table_new_full (NULL, NULL, NULL, destory_func);
   nvstreamdemux->pad_caps_is_raw = g_hash_table_new (NULL, NULL);
   nvstreamdemux->pad_stream_start_sent = g_hash_table_new (NULL, NULL);
   nvstreamdemux->eos_flag = g_hash_table_new (NULL, NULL);
diff --git a/gstnvstreammux.cpp b/gstnvstreammux.cpp
index 599d5ea4..02853296 100644
--- a/gstnvstreammux.cpp
+++ b/gstnvstreammux.cpp
@@ -618,7 +618,7 @@ gst_nvstreammux_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
     g_free(name);
     return FALSE;
   }
-
+  g_free(name); /* add */
   LOGD("DEBUGME\n");
 
   if (gst_nvquery_is_batch_size (query)) {
@@ -667,6 +667,7 @@ gst_nvstreammux_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
         LOGD("ret_caps from src_pad after intersect is [%s]\n", gst_caps_to_string(ret_caps));
         if(!ret_caps || gst_caps_is_empty (ret_caps) || gst_caps_is_any (ret_caps)) {
           LOGD("src not returning caps yet\n");
+          gst_caps_unref (ret_caps); /* add */
           /** try querying caps on src pad with filter and check */
           ret_caps = gst_pad_peer_query_caps (mux->srcpad, filter);
           /** remove memory:NVMM feature as this is not supported upstream */
@@ -677,6 +678,7 @@ gst_nvstreammux_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
           LOGD("ret_caps from src_pad with filter after intersect is [%s]\n", gst_caps_to_string(ret_caps));
           LOGD("ret_caps=[%s]\n", gst_caps_to_string(ret_caps));
           if(!ret_caps || gst_caps_is_empty (ret_caps) || gst_caps_is_any (ret_caps)) {
+            gst_caps_unref (ret_caps); /* add */
             ret_caps = gst_pad_get_pad_template_caps (pad);
           }
         }

Thanks for the sharing! are you testing on DS7.0? can you please highlight the fix? please refer to my last comment, if you still find memory in new streammux, you can get valgrind log, then analyze the “definitely lost” leak part in new streammux code.

I still find memory leak even with DS7.0, so I used valgrind to get the memory leaking part with new nvstreammux.
I provide this log I got from valgrind.
valgrind.log (241.9 KB)

And the following is the difference with the memory leak fixed.
Please check if my fix is correct.
If my correction is incorrect, please point it out.
If my correction is not correct, please also point it out.

I ran valgrind in the docker container.

diff

diff --git a/gstnvstreamdemux.cpp b/gstnvstreamdemux.cpp
index e296fbc2..3dc60c2c 100644
--- a/gstnvstreamdemux.cpp
+++ b/gstnvstreamdemux.cpp
@@ -597,11 +597,15 @@ set_src_pad_caps (GstNvStreamDemux * nvstreamdemux, gint index, gint width_val,
     GstCaps *other_caps = NULL;
     GstCapsFeatures *features = gst_caps_features_from_string ("memory:NVMM");
 
+    printf("gst_caps_features_from_string (\"memory:NVMM\")\n"); /* add */
     keys = keys->next;
 
     LOGD("pad=%p\n", pad);
     if (!pad)
+    {
+      gst_caps_features_free(features); /* add */
       continue;
+    }
 
     /** query the muxer for this stream's caps and use that if available */
     if(GST_ELEMENT_CAST (nvstreamdemux)->sinkpads) {
@@ -716,6 +720,7 @@ set_src_pad_caps (GstNvStreamDemux * nvstreamdemux, gint index, gint width_val,
 
       }
     }
+    gst_caps_features_free(features); /* add */
   }
 
   return ret;
@@ -765,6 +770,7 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
   if (GST_EVENT_TYPE (event) == GST_NVEVENT_UPDATE_CAPS) {
 	  const GstStructure *const str = gst_event_get_structure(event);
 	  LOGD("Got update-caps event\n");
+	  printf("Got update-caps event\n"); /* add print */
 	  GstCaps *new_caps;
 	  GstStructure *new_caps_str;
 	  guint stream_index;
@@ -794,8 +800,13 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
 			  fr);
 	  ret = set_src_pad_caps (nvstreamdemux, stream_index, width_val, height_val, stream_id);
 	  g_mutex_unlock (&nvstreamdemux->ctx_lock);
+
+    printf("KEY : %u\tSIZE : %zu\n", stream_index, sizeof (GValue)); /* add print */
+
+
 	  //GST_OBJECT_UNLOCK (nvstreamdemux);
 
+    gst_event_unref(event); /* add */
 	  return ret;
   }
 
@@ -846,20 +857,27 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
   }
 
   if (GST_EVENT_TYPE (event) == GST_NVEVENT_STREAM_SEGMENT) {
+    printf("got STREAM_SEGMENT in demux\n"); /* add */
     GstSegment *segment;
     GstPad *src_pad = NULL;
     guint source_id = 0;
+    gboolean ret = FALSE; /* add */
     gst_nvevent_parse_stream_segment (event, &source_id,
         &segment);
     src_pad =
       GST_PAD (g_hash_table_lookup (nvstreamdemux->pad_indexes,
             source_id + (char *)NULL));
     if (!src_pad) {
+      gst_segment_free(segment); /* add */
+      gst_event_unref(event); /* add */
       return TRUE;
     }
 
     LOGD("sending segment event on pad %d\n", source_id);
-    return gst_pad_push_event (src_pad, gst_event_new_segment (segment));
+    ret = gst_pad_push_event (src_pad, gst_event_new_segment (segment));
+    gst_segment_free(segment); /* add */
+    gst_event_unref(event); /* add */
+    return ret;
   }
 
   if ((GST_EVENT_TYPE (event) == GST_NVEVENT_PAD_ADDED) ||
@@ -870,6 +888,7 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
 
   if (GST_EVENT_TYPE (event) == GST_NVEVENT_STREAM_START) {
     LOGD("got STREAM_START in demux\n");
+    printf("got STREAM_START in demux\n"); /* add */
     GstPad *src_pad = NULL;
     gchar* stream_id = NULL;
     guint source_id = 0;
@@ -880,11 +899,13 @@ gst_nvstreamdemux_sink_event (GstPad * pad, GstObject * parent,
       GST_PAD (g_hash_table_lookup (nvstreamdemux->pad_indexes,
             source_id + (char *)NULL));
     if (!src_pad) {
+      g_free(stream_id); /* add */
       return TRUE;
     }
 
     LOGD("sending stream-start event on pad %d stream_id=%s\n", source_id, stream_id);
     send_stream_start_if_not_already_sent(nvstreamdemux, source_id, stream_id, src_pad);
+    g_free(stream_id); /* add */
     return TRUE;
   }
 
@@ -958,6 +979,13 @@ gst_nvstreamdemux_2_class_init (GstNvStreamDemuxClass * klass)
       GST_DEBUG_FUNCPTR (gst_nvstreamdemux_release_pad);
 }
 
+void destory_func(gpointer val) /* add */
+{
+  printf("Call destroy_func\n");
+  g_value_unset((GValue *)val);
+  g_free(val);
+}
+
 static void
 gst_nvstreamdemux_2_init (GstNvStreamDemux * nvstreamdemux)
 {
@@ -977,7 +1005,7 @@ gst_nvstreamdemux_2_init (GstNvStreamDemux * nvstreamdemux)
   gst_element_add_pad (GST_ELEMENT (nvstreamdemux), nvstreamdemux->sinkpad);
 
   nvstreamdemux->pad_indexes = g_hash_table_new (NULL, NULL);
-  nvstreamdemux->pad_framerates = g_hash_table_new (NULL, NULL);
+  nvstreamdemux->pad_framerates = g_hash_table_new_full (NULL, NULL, NULL, destory_func); /* add */
   nvstreamdemux->pad_caps_is_raw = g_hash_table_new (NULL, NULL);
   nvstreamdemux->pad_stream_start_sent = g_hash_table_new (NULL, NULL);
   nvstreamdemux->eos_flag = g_hash_table_new (NULL, NULL);
diff --git a/gstnvstreammux.cpp b/gstnvstreammux.cpp
index 599d5ea4..906c44d6 100644
--- a/gstnvstreammux.cpp
+++ b/gstnvstreammux.cpp
@@ -618,7 +618,7 @@ gst_nvstreammux_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
     g_free(name);
     return FALSE;
   }
-
+  g_free(name); /* add */
   LOGD("DEBUGME\n");
 
   if (gst_nvquery_is_batch_size (query)) {
@@ -1054,8 +1054,9 @@ static bool handle_caps(unsigned int pad_id, GstPad * pad, GstObject * parent, G
         gst_caps_unref (src_caps);
         gst_caps_unref (new_caps);
       }
-
-      new_event = gst_nvevent_new_update_caps (pad_id, width_val, height_val, caps_str, gst_pad_get_stream_id(pad), 0);
+      gchar *id = gst_pad_get_stream_id(pad); /* add */
+      new_event = gst_nvevent_new_update_caps (pad_id, width_val, height_val, caps_str, id, 0);
+      g_free(id); /* add */
       event_ret = gst_pad_push_event (mux->srcpad, new_event);
       if(!event_ret) {
          LOGD("failed to set updated caps [%s] on source pad\n", gst_caps_to_string(caps));
@@ -1231,6 +1232,7 @@ gst_nvstreammux_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
          default:
            break;
         }
+        gst_event_unref(ev); /* add */
     }
     LOGD("event pushed to src pad [%s]\n", GST_EVENT_TYPE_NAME(event));
     ret = gst_pad_push_event (mux->srcpad, event);
@@ -1811,6 +1813,13 @@ gst_nvstreammux_2_class_init (GstNvStreamMuxClass * klass)
   gstelement_class->change_state =
       GST_DEBUG_FUNCPTR (gst_nvstreammux_change_state);
 }
+
+void destory_func2(gpointer val) /* add */
+{
+  printf("Call destroy_func nxtreammux\n");
+  gst_caps_unref((GstCaps *)val);
+}
+
 static void
 gst_nvstreammux_2_init (GstNvStreamMux * mux)
 {
@@ -1851,7 +1860,7 @@ gst_nvstreammux_2_init (GstNvStreamMux * mux)
   mux->sys_ts = DEFAULT_ATTACH_SYS_TIME_STAMP;
   mux->ntp_calc_mode = GST_NVDS_NTP_CALC_MODE_SYSTEM_TIME;
   mux->pad_task_created = FALSE;
-  mux->sink_pad_caps = g_hash_table_new (NULL, NULL);
+  mux->sink_pad_caps = g_hash_table_new_full (NULL, NULL, NULL, destory_func2); /* add */
   mux->sync_inputs = FALSE;
   mux->max_latency = 0;
   mux->has_peer_latency = FALSE;

About the log, how long did you test? is the memory leak is severe?
I can’t find streammux in the log valgrind.log. how do you know the memory is from new streammux?