Dear developers,
I face a memory leak problem in nVidia driver.
Environment:
Windows7 64bit
Quadro5000
CUDA GPU COMPUTING SDK 4.0
Driver ver.296.70
Memory leak happens when you call the function clReleaseMemObject()
after calling clCreateBuffer().
And here is a sample code to verify that memory leak appers.
#include "CL/cl.h"
#include
#include
#include
int main(void)
{
cl_int ret;
cl_platform_id platforms[16];
cl_uint numPlatforms;
ret = clGetPlatformIDs(16, platforms, &numPlatforms);
assert(ret == CL_SUCCESS);
assert(numPlatforms < 16);
int index;
for (index = 0; index < (int)numPlatforms; index++) {
char name[256];
size_t retSize;
ret = clGetPlatformInfo(platforms[index], CL_PLATFORM_NAME, sizeof name, name, &retSize);
assert(ret == CL_SUCCESS);
if (strcmp(name, "NVIDIA CUDA") == 0) {
break;
}
}
cl_platform_id platform = platforms[index];
cl_device_id device;
cl_uint numDevices;
ret = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, &numDevices);
assert(ret == CL_SUCCESS);
cl_context_properties props[] = {CL_CONTEXT_PLATFORM, (cl_context_properties)platform, 0};
cl_context context = clCreateContext(props, 1, &device, NULL, NULL, &ret);
assert(ret == CL_SUCCESS);
const int COUNT_MAX = 10000000;
for (int count = 0; count < COUNT_MAX; count++) {
if ( count % 10000 == 0) printf("count=%d\n",count);
cl_mem clmemory = clCreateBuffer(context, CL_MEM_READ_WRITE, 128*128*sizeof(int), NULL, &ret);
assert(ret == CL_SUCCESS);
ret = clReleaseMemObject(clmemory);
assert(ret == CL_SUCCESS);
}
ret = clReleaseContext(context);
assert(ret == CL_SUCCESS);
return 0;
}
How can I avoid a memory leak problem?
Is there anyone who has alternatives?
Regards,