GPU memory leak

I need restart movie in the my program. But cuvidDecoder has memory leak in GPU memory.

// CUDA_VERSION 4000
// NVIDIA GeForce GTX 560
// DisplayDriver: 6.14.12.7533
// OS: WindowsXP Professional
// DirectX: 9.0c

// test

cuCtxPushCurrent(m_Context);
do{
oResult = cuvidCreateDecoder(&oDecoder_, &oVideoDecodeCreateInfo_);
if(oDecoder_)
cuvidDestroyDecoder(oDecoder_);
oDecoder_ = NULL;

}while(1);
cuCtxPopCurrent(NULL);

oVideoDecodeCreateInfo_ initialization:

m_Context = rContext;
m_VideoCreateFlags = cudaVideoCodec_MPEG2;
switch (eCreateFlags) {
case cudaVideoCreate_Default: printf(“Default (VP)\n”); break;
case cudaVideoCreate_PreferCUDA: printf(“Use CUDA decoder\n”); break;
case cudaVideoCreate_PreferDXVA: printf(“Use DXVA decoder\n”); break;
case cudaVideoCreate_PreferCUVID: printf(“Use CUVID decoder\n”); break;
default: printf(“Unknown value\n”); break;
}
assert(cudaVideoCodec_MPEG1 == rVideoFormat.codec ||
cudaVideoCodec_MPEG2 == rVideoFormat.codec ||
cudaVideoCodec_MPEG4 == rVideoFormat.codec ||
cudaVideoCodec_VC1 == rVideoFormat.codec ||
cudaVideoCodec_H264 == rVideoFormat.codec ||
cudaVideoCodec_JPEG == rVideoFormat.codec ||
cudaVideoCodec_YUV420== rVideoFormat.codec ||
cudaVideoCodec_YV12 == rVideoFormat.codec ||
cudaVideoCodec_NV12 == rVideoFormat.codec ||
cudaVideoCodec_YUYV == rVideoFormat.codec ||
cudaVideoCodec_UYVY == rVideoFormat.codec );

assert(cudaVideoChromaFormat_Monochrome == rVideoFormat.chroma_format ||
cudaVideoChromaFormat_420 == rVideoFormat.chroma_format ||
cudaVideoChromaFormat_422 == rVideoFormat.chroma_format ||
cudaVideoChromaFormat_444 == rVideoFormat.chroma_format);

// Fill the decoder-create-info struct from the given video-format struct.
memset(&oVideoDecodeCreateInfo_, 0, sizeof(CUVIDDECODECREATEINFO));
// Create video decoder
oVideoDecodeCreateInfo_.CodecType = rVideoFormat.codec;
oVideoDecodeCreateInfo_.ulWidth = rVideoFormat.coded_width;
oVideoDecodeCreateInfo_.ulHeight = rVideoFormat.coded_height;
oVideoDecodeCreateInfo_.ulNumDecodeSurfaces = FrameQueue::cnMaximumSize;
// Limit decode memory to 24MB (16M pixels at 4:2:0 = 24M bytes)
while (oVideoDecodeCreateInfo_.ulNumDecodeSurfaces * rVideoFormat.coded_width * rVideoFormat.coded_height > 1610241024)
{
oVideoDecodeCreateInfo_.ulNumDecodeSurfaces–;
}
oVideoDecodeCreateInfo_.ChromaFormat = rVideoFormat.chroma_format;
oVideoDecodeCreateInfo_.OutputFormat = cudaVideoSurfaceFormat_NV12;
oVideoDecodeCreateInfo_.DeinterlaceMode = cudaVideoDeinterlaceMode_Adaptive;

// No scaling
oVideoDecodeCreateInfo_.ulTargetWidth = oVideoDecodeCreateInfo_.ulWidth;
oVideoDecodeCreateInfo_.ulTargetHeight = oVideoDecodeCreateInfo_.ulHeight;
oVideoDecodeCreateInfo_.ulNumOutputSurfaces = MAX_FRAME_COUNT; // We won’t simultaneously map more than 8 surfaces
oVideoDecodeCreateInfo_.ulCreationFlags = m_VideoCreateFlags;
oVideoDecodeCreateInfo_.vidLock = m_VidCtxLock;
// create the decoder

Not to sound facetious, but what’s your question? Are you asking where the memory leak is introduced? One thing I couldn’t help noticing is that you never destroy your context, which should lead to some memory leaks. Specifically, your loop never exits, and even if it did, it would only make your context a floating one, it wouldn’t delete it. Try “cuCtxDestroy” instead. I don’t think any memory leaks can be introduced in your “oVideoDecodeCreateInfo_ initialization” code (that’s straight from the SDK examples isn’t it? External Image )

yes this is test from SDK samples.

Infinite loop shows how you can replay memory leak.

I can’t destroy common context because I use multithreaded application and this context used by other threads.

I think that if I create an object (in this case the decoder), after the destruction of the object, memory usage must be cleared.

Thank you for your reply