I’m implementing a simple boxcar filter only as an excuse to evaluate the different speed of 2D local texture and global memory accesses.
More in detail, the .cu file is the following
#include
#include
#include "cufft.h"
#include "Kernels_Test_Texture_Float.cuh"
#define BLOCK_SIZE_x 16
#define BLOCK_SIZE_y 16
/**********************/
/* TEST TEXTURE FLOAT */
/**********************/
extern "C" void Function_Test_Texture_Float(float* data, float* dev_result, int N1, int N2){
size_t pitch;
float* data_d;
cudaMallocPitch((void**)&data_d,&pitch, N1 * sizeof(float), N2);
cudaChannelFormatDesc desc = cudaCreateChannelDesc();
cudaBindTexture2D(0,&data_d_texture,data_d,&desc,N1,N2,pitch);
cudaMemcpy2D(data_d,pitch,data,sizeof(float)*N1,sizeof(float)*N1,N2,cudaMemcpyHostToDevice);
cudaMemset(dev_result,0,sizeof(float)*N1*N2);
dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y); dim3 dimGrid(N1/BLOCK_SIZE_x + (N1%BLOCK_SIZE_x == 0 ? 0:1),N2/BLOCK_SIZE_x + (N2%BLOCK_SIZE_x == 0 ? 0:1));
Kernel_Test_Texture_Float(dev_result,N1, N2);
}
/**************/
/* TEST FLOAT */
/**************/
extern "C" void Function_Test_Float(float* data, float* dev_result2, int N1, int N2){
float* data_d; cudaMalloc((void**)&data_d,sizeof(float)*N1*N2);
cudaMemcpy(data_d,data,sizeof(float)*N1*N2,cudaMemcpyHostToDevice);
cudaMemset(dev_result2,0,sizeof(float)*N1*N2);
dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y); dim3 dimGrid(N1/BLOCK_SIZE_x + (N1%BLOCK_SIZE_x == 0 ? 0:1),N2/BLOCK_SIZE_x + (N2%BLOCK_SIZE_x == 0 ? 0:1));
Kernel_Test_Float(dev_result2,data_d,N1, N2);
}
The .cuh file is the following
texture<float,2> data_d_texture;
/**************************/
/* 2D TEXTURE TEST KERNEL */
/**************************/
__global__ void Kernel_Test_Texture_Float(float* dev_result, int N1, int N2)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
float datum, accumulator=0.;
int size_x=5;
int size_y=5;
if((i<(N1-size_x))&&(j<(N2-size_y)))
{
for (int k=0; k<size_x; k++)
for (int l=0; l<size_y; l++){
datum = tex2D(data_d_texture,i+k,j+l);
accumulator = accumulator + datum;
}
dev_result[j*blockDim.x*gridDim.x+i] = accumulator;
}
}
/******************/
/* 2D TEST KERNEL */
/******************/
__global__ void Kernel_Test_Float(float* dev_result2, float* data_d, int N1, int N2)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
float accumulator=0.;
int size_x=5;
int size_y=5;
if((i<(N1-size_x))&&(j<(N2-size_y)))
{
for (int k=0; k<size_x; k++)
for (int l=0; l<size_y; l++){
accumulator = accumulator + data_d[(j+l)*blockDim.x*gridDim.x+(i+k)];
}
dev_result2[j*blockDim.x*gridDim.x+i] = accumulator;
}
}
However, the global memory kernel results much faster than the texture memory kernel (94us vs 615us - the timing is the result of the Visual Profiler - the card is a GeForce GT 540M).
Is there anything wrong in the use I’m doing of the texture memory or global memory is indeed faster than texture being cached (L1&L2)?
I have found the paper http://math.arizona.edu/~dongbin/Publications/GPUImplementations.pdf. If my understanding is correct (see Fig. 4), for a GTX480, which has a compute capability 2.0, global memory storage is preferable as compared to texture memory storage. Should we conclude that, unless implementing (e.g.) linear interpolators (which should be faster when implemented in hardware by texture memory), global memory storage is better than texture memory storage for general purpose applications?
Thanks in advance for any comment.