Cuda Texture binding causes invalid argument

I get an invalid argument failure if i try to bind my texture to the cudaArray.

texture<int, 2, cudaReadModeElementType> texPyr;

// try texture usage
// allocate Cuda array in device memory
  cudaArray *cuArray;
  cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
  e = cudaMallocArray(&cuArray, &channelDesc, imageWidth, imageHeight);
  printf("Error: %s\n", cudaGetErrorString(e));
  e = cudaMemcpyToArray(cuArray, 0, 0, inputData, imageWidth * imageHeight * sizeof(int),      cudaMemcpyHostToDevice);
  printf("Error: %s\n", cudaGetErrorString(e));

// set texture parameters
   texPyr.addressMode[0] = cudaAddressModeClamp;
   texPyr.addressMode[1] = cudaAddressModeClamp;
   texPyr.filterMode = cudaFilterModePoint;
   texPyr.normalized = false;

// bind the texture
   e = cudaBindTextureToArray(&texPyr, cuArray, &channelDesc);
   printf("Error: %s\n", cudaGetErrorString(e));

Any ideas?

you might want to study any of the cuda sample codes that use this function. You’ll find differences between what you’re doing and what is demonstrated there.

Thanks for your advice but i tried another example and it doesn’t work either.

//globally defined in cu-file
texture<int, 2, cudaReadModeElementType> texPyr;

// called in method prepareTexture

size_t pitch;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
e = cudaMallocPitch((void**)&d_imgPyr, &pitch, imageWidth * sizeof(int), imageHeight);
printf("Fehler: %s\n", cudaGetErrorString(e));
e = cudaMemcpy2D(d_imgPyr, pitch, inputData, imageWidth * sizeof(int), imageWidth * sizeof(int), imageHeight, cudaMemcpyHostToDevice);
printf("Fehler: %s\n", cudaGetErrorString(e));

// bind the texture
e = cudaBindTexture2D(NULL, &texPyr, d_imgPyr, &channelDesc, imageWidth, imageHeight, pitch);
printf("Fehler: %s\n", cudaGetErrorString(e));

// set texture parameters
texPyr.addressMode[0] = cudaAddressModeClamp;
texPyr.addressMode[1] = cudaAddressModeClamp;
texPyr.filterMode = cudaFilterModePoint;
texPyr.normalized = false;

it works until cudaBindTexture2D is called and then invalid argument is thrown. Can anyone give me a corrected version of this? Thanks

texture<int, 2, cudaReadModeElementType> texPyr;

cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();

float and int are not friends.

same thing is pointed out on your cross-posting:

https://stackoverflow.com/questions/52200215/cuda-texture-binding-causes-invalid-argument