Failed in mem copy

hi, my program encounter this error.

/dvs/git/dirty/git-master_linux/nvutils/nvbufsurftransform/nvbufsurftransform_copy.cpp:438: => Failed in mem copy

/dvs/git/dirty/git-master_linux/nvutils/nvbufsurftransform/nvbufsurftransform_copy.cpp:438: => Failed in mem copy

0:17:00.065758943 3512542 0xffff180160c0 ERROR nvinfer gstnvinfer.cpp:1267:get_converted_buffer: cudaMemset2DAsync failed with error cudaErrorIllegalAddress while converting buffer
0:17:00.065853412 3512542 0xffff180160c0 WARN nvinfer gstnvinfer.cpp:1576:gst_nvinfer_process_full_frame: error: Buffer conversion failed
GStreamer 错误: gst-stream-error-quark: Buffer conversion failed (1), /dvs/git/dirty/git-master_linux/deepstream/sdk/src/gst-plugins/gst-nvinfer/gstnvinfer.cpp(1576): gst_nvinfer_process_full_frame (): /GstPipeline:pipeline0/GstNvInfer:primary-inference-0
0:17:00.160870942 3512542 0xffff180160c0 ERROR nvinfer gstnvinfer.cpp:1267:get_converted_buffer: cudaMemset2DAsync failed with error cudaErrorIllegalAddress while converting buffer
0:17:00.161341686 3512542 0xffff180160c0 WARN nvinfer gstnvinfer.cpp:1576:gst_nvinfer_process_full_frame: error: Buffer conversion failed

CUDA:12.6.68
cuDNN:9.3.075
TensorRT:10.3.0.30

Can you please provide the DeepStream version and steps to reproduce this issue? Thank you.

hello,my deepstream version is 7.1.0, and my deepstream-python pipline code:

def _create_pipeline_elements(self):

            source = Gst.ElementFactory.make("rtspsrc", "rtsp-source")

            source.set_property('location', self.video_source)

            source.set_property('latency', 300) 

            source.set_property('protocols', 'tcp') 

            source.set_property('tcp-timeout', 100000000) # 10s TCP超时

            source.set_property("do-rtcp", True)

            source.set_property("retry", 5)

            source.set_property("timeout", 5)

            \# source.set_property('drop-on-latency', True)

            self.pad_added_handler_id = source.connect("pad-added", self.on_pad_added)

            self.rtpdepay = Gst.ElementFactory.make("rtph264depay", "rtp-h264-depay")

            h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")

            decoder = Gst.ElementFactory.make("nvv4l2decoder", "nv-decoder")

            decoder.set_property("enable-max-performance", True)

            self.pipeline.add(source)

            self.pipeline.add(self.rtpdepay)

            self.pipeline.add(h264parser)

            self.pipeline.add(decoder)

            assert self.rtpdepay.link(h264parser)

            assert h264parser.link(decoder)

            queue_before_mux = Gst.ElementFactory.make("queue", "queue-before-mux")

            self.pipeline.add(queue_before_mux)

            assert decoder.link(queue_before_mux)

            nvvidconv_pre_mux = Gst.ElementFactory.make("nvvideoconvert", "pre-mux-converter")

            nvvidconv_pre_mux.set_property('compute-hw', 1)

            \# nvvidconv_pre_mux.set_property('nvbuf-memory-type', 2)

            self.pipeline.add(nvvidconv_pre_mux)

            caps_str = f"video/x-raw(memory:NVMM), width={self.stream_width}, height={self.stream_height}, format=NV12"

            caps_filter = Gst.ElementFactory.make("capsfilter", "pre-mux-capsfilter")

            caps_filter.set_property("caps", Gst.Caps.from_string(caps_str))

            self.pipeline.add(caps_filter)

            assert queue_before_mux.link(nvvidconv_pre_mux)

            assert nvvidconv_pre_mux.link(caps_filter)



            \# 2. nvstreammux

            streammux = Gst.ElementFactory.make("nvstreammux", "stream-muxer")

            self.pipeline.add(streammux)

            streammux.set_property('live-source', True)

            streammux.set_property('width', self.stream_width)  

            streammux.set_property('height', self.stream_height)  

            streammux.set_property('batch-size', 1)

            streammux.set_property('batched-push-timeout', 50000)

            sinkpad = streammux.get_request_pad("sink_0")

            srcpad = caps_filter.get_static_pad("src") 

            assert srcpad.link(sinkpad) == Gst.PadLinkReturn.OK

            \# 3. 串联创建所有推理(nvinfer)元件

            last_element = streammux

            for idx, m in enumerate(self.models):

                pgie = Gst.ElementFactory.make("nvinfer", f"primary-inference-{idx}")

                pgie.set_property('config-file-path', m\['config'\])

                

                component_id = idx + 1

                pgie.set_property('unique-id', component_id)

               

                self.model_id_map\[component_id\] = {

                    'name': m\['name'\],

                    'class_filter': m\['class_filter'\]

                }

                self.pipeline.add(pgie)

                assert last_element.link(pgie)

                last_element = pgie

            tracker = Gst.ElementFactory.make("nvtracker", "shared-tracker")

            tracker_config_path = self.models\[0\]\['tracker_cfg'\]

            cfg = configparser.ConfigParser()

            cfg.read(tracker_config_path)

            for k, v in cfg\['tracker'\].items():

                try:

                    tracker.set_property(k, int(v))

                except (ValueError, TypeError):

                    tracker.set_property(k, v)

            

            self.pipeline.add(tracker)

            assert last_element.link(tracker)

            last_element = tracker

            tracker_src_pad = tracker.get_static_pad("src")

            tracker_src_pad.add_probe(

                Gst.PadProbeType.BUFFER,

                self.\_probe_all_models,

                None)

            nvvidconv_post = Gst.ElementFactory.make("nvvideoconvert", "convertor-post")

            nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

            nvosd.set_property("process-mode", 0)

            gpu_conv_out = Gst.ElementFactory.make("nvvideoconvert", "gpu-convert-out")

            cpu_conv_out = Gst.ElementFactory.make("videoconvert", "cpu-convert-out")

           

            \# appsink

            appsink = Gst.ElementFactory.make("appsink", "appsink-final")

            appsink.set_property("emit-signals", True)

            appsink.set_property("sync", False)

            appsink.set_property("max-buffers", 5)

            appsink.set_property("drop", True)

            

            appsink.set_property("caps", Gst.Caps.from_string(f"video/x-raw,format=BGR"))

            appsink.connect("new-sample", self.\_on_new_sample, 0)

            

            final_elements = (nvvidconv_post, nvosd, gpu_conv_out, cpu_conv_out, appsink)

            for elem in final_elements:

                self.pipeline.add(elem)

            assert last_element.link(nvvidconv_post)

            assert nvvidconv_post.link(nvosd)

            assert nvosd.link(gpu_conv_out)

            assert gpu_conv_out.link(cpu_conv_out)

            assert cpu_conv_out.link(appsink)

Are you using deepstream with Jetpack 6.2 on Jetson?

Refer to this topic as a workaroud