I am having some troubles using OpenGL-CUDA-Interopt
My setup is the following:
I am using a FrameBufferObject for render to texture (this works very well) which I am creating this way:
void createFrameBuffer(){
// Create the texture for the FBO
glGenTextures(1, &tex_data);
glBindTexture(GL_TEXTURE_2D, tex_data);
SDK_CHECK_ERROR_GL();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F , window_w, window_h, 0, GL_RGBA, GL_FLOAT, NULL);
// Create the frameBuffer
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
// Register the texture with cuda
checkCudaErrors(cudaGraphicsGLRegisterImage(&res_data, tex_data, GL_TEXTURE_2D, cudaGraphicsMapFlagsReadOnly));
SDK_CHECK_ERROR_GL();
// attach images
glGenRenderbuffersEXT(1, &depth_rb);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depth_rb);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, window_w, window_h);
//-------------------------
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex_data, 0);
//Attach depth buffer to FBO
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_rb);
//glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_RENDERBUFFER_EXT, color);
//glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth);
// clean up
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
SDK_CHECK_ERROR_GL();
}
Currently I am not having a kernel, but I want to convert the texture from OpenGL to something that I can use with CUDA. Here is what I am trying for conversion:
void processImage(){
cudaArray *cuda_data = 0;
checkCudaErrors( cudaGraphicsGLRegisterImage(&res_data, tex_data, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsReadOnly));
float *dData; // Array holding the data
std::cout << "cudaMalloc " << std::endl;
checkCudaErrors( cudaMalloc(&dData, window_w * window_h*4*sizeof(float)));
std::cout << "Mapping Resource" << std::endl;
checkCudaErrors( cudaGraphicsMapResources(1, &res_data) );
std::cout << "cudaGraphicsSubResourceGetMappedArray" << std::endl;
checkCudaErrors( cudaGraphicsSubResourceGetMappedArray(&cuda_data, res_data, 0, 0));
std::cout << "cudaMemcpy2DFromArray" << std::endl;
// Copy from the texture to the graphic Card
checkCudaErrors( cudaMemcpy2DFromArray(dData, window_w*4*sizeof(float), cuda_data ,0 ,0, window_w*4*sizeof(float), window_h, cudaMemcpyDeviceToDevice));
std::cout << "cudaGraphicsUnmapResources" << std::endl;
// Unmap the resource
checkCudaErrors( cudaGraphicsUnmapResources(1, &res_data));
}
The problem is when executing the program CUDA throws an error:
> CUDA error at
> e:\workspace\master\gradientrenderer\gradientrenderer\source.cpp:175
> code=11(cudaErrorInvalidValue) "cudaMemcpy2DFromArray(dData,
> window_w*4*sizeof(float), cuda_data ,0 ,0, window_w*4*sizeof(float),
> window_h, cudaMemcpyDeviceToDevice)"
Can someone tell me what I am doing wrong? As this is part of a bigger project using a predefined kernel (no way to change this kernel) I need to call cudaMemcpy2DFromArray so that the texture is correctly transferred to the kernel.