Separable Convolution and Shared Memory

Hi all,
This is one of my first posts on these forums so please do let me know if I breach and ettiquette conventions.

So I am attempting to perform separable convolution and have been looking at many examples where one loads and image patch into a “tile” in shared memory, much like the example that comes with CUDA, also found here http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/samples.html#convolutionSeparable. However, it is not clear to me how such a scheme would work when the kernel size exceeds that of the tile.

I have today tried to solve the problem, as described here http://stackoverflow.com/questions/41748504/loading-data-to-shared-memory-for-separable-convolution to no avail.

Any pointers/tips on this topic would be greatly appreciated. I would really rather not perform FFT based convolution as the massaging of the data into a suitable form may produce too much overhead. Also, I am wanting to do a separable approximation to the Bilateral filter also, which I’m not sure works with the FFT approach.

Best

The NVIDIA NPP library provides routines for separable convolution (but I don’t know how well optimized these routines are).

Thanks for the reply. However, due to my use case I’m not sure it’d work.

Basically, I wand to convolve ovver the x and y dimensions of a 3D tensor, aggregating and then normalising the convolution output over the z dimension.

The approach I am trying is shown below:-

void GaussianFilterSeparable::applyXDirection(const float *input, float *output, const float *kernel, float sd, int dim, int W, int H) {
	dim3 blockDims(16, 16, 1);
	dim3 gridDims(static_cast<int>(ceil(W / blockDims.x)), static_cast<int>(ceil(H / blockDims.y)));
	size_t sharedBufferSize = ((blockDims.y*(blockDims.x + 4*static_cast<int>(sd)))*dim);//TO-DO: remove hard coding channels.
	filterGaussianX_device << <gridDims, blockDims, sharedBufferSize * sizeof(float) >> > (input, output, kernel, sd, dim, W, H, sharedBufferSize);
}

void GaussianFilterSeparable::applyYDirection(const float *input, float *output, const float *kernel, float sd, int dim, int W, int H) {
	dim3 blockDims(16, 16, 1);
	dim3 gridDims(static_cast<int>(ceil(W / blockDims.x)), static_cast<int>(ceil(H / blockDims.y)));
	size_t sharedBufferSize = ((blockDims.y + 4 * static_cast<int>(sd))*blockDims.x)*dim;//TO-DO: remove hard coding channels.
	filterGaussianY_device << <gridDims, blockDims, sharedBufferSize * sizeof(float) >> > (input, output, kernel, sd, dim, W, H, sharedBufferSize);
}

__global__
void filterGaussianX_device(const float *input, float *output, const float *kernel, float sd, int dim, int W, int H, size_t sharedSize) {
	int x = blockIdx.x*blockDim.x + threadIdx.x;
	int y = blockIdx.y*blockDim.y + threadIdx.y;
	if (x >= W || y >= H) {
		return;
	}
	extern __shared__ float distData[];
	__syncthreads();

	//Load data into shared memory.
	int twoSD = 2 * (int)sd;
	int idxLeftGlobal = x - twoSD;
	int idxRightGlobal = x + twoSD;
	int localIdxLeft = threadIdx.y*(blockDim.x + 2 * twoSD) + (threadIdx.x - twoSD);
	int localIdxRight = threadIdx.y*(blockDim.x + 2 * twoSD) + (threadIdx.x + twoSD);
	for (int i = 0; i < dim; i++) {
		int sharedIdx = localIdxLeft*dim + i;
		if (sharedIdx >= 0 && sharedIdx < sharedSize) {
			distData[sharedIdx] = (idxLeftGlobal > 0) ? input[(y*W + idxLeftGlobal)*dim + i] : 0.0;
		}

		sharedIdx = localIdxRight*dim + i;
		if (sharedIdx >= 0 && sharedIdx < sharedSize) {
			distData[sharedIdx] = (idxRightGlobal < W) ? input[(y*W + idxRightGlobal)*dim + i] : 0.0;
		}
	}
	__syncthreads();

	//Do the convolution.
	float *channelSums = new float[dim];
	for (int i = 0; i < dim; i++) {
		channelSums[i] = 0.0;
	}

	int rad = (int)sd;
	float normaliser = 0.0;
	int sharedIdx = 0;
	for (int r = -rad; r <= rad; r++) {
		normaliser += kernel[rad - r];
		for (int i = 0; i < dim; i++) {
			sharedIdx = (threadIdx.y*(blockDim.x + 2 * twoSD) + threadIdx.x + r)*dim + i;
			if (sharedIdx >= 0 && sharedIdx < sharedSize) {
				channelSums[i] += distData[sharedIdx] * kernel[rad - r];
			}
		}
	}

	if (normaliser > 0.0 || normaliser < 0.0) {
		for (int i = 0; i < dim; i++) {
			output[(y*W + x)*dim + i] = channelSums[i] / normaliser;
		}
	}
	delete[] channelSums;
}

__global__
void filterGaussianY_device(const float *input, float *output, const float *kernel, float sd, int dim, int W, int H, size_t sharedSize) {
	int x = blockIdx.x*blockDim.x + threadIdx.x;
	int y = blockIdx.y*blockDim.y + threadIdx.y;
	if (x >= W || y >= H) {
		return;
	} 
	extern __shared__ float distData[];
	__syncthreads();

	//Load data into shared memory.
	int twoSD = 2 * (int)sd;
	int idxUpGlobal = y - twoSD;
	int idxDownGlobal = y + twoSD;
	int localIdxUp = (threadIdx.y - twoSD)*blockDim.x + threadIdx.x;
	int localIdxDown = (threadIdx.y + twoSD)*blockDim.x + threadIdx.x;
	for (int i = 0; i < dim; i++) {
		int sharedIdx = localIdxUp*dim + i;
		if (sharedIdx >= 0 && sharedIdx < sharedSize) {
			distData[sharedIdx] = (idxUpGlobal > 0) ? input[(idxUpGlobal*W + x)*dim + i] : 0.0;
		}
		
		sharedIdx = localIdxDown*dim + i;
		if (sharedIdx >= 0 && sharedIdx < sharedSize) {
			distData[sharedIdx] = (idxDownGlobal < H) ? input[(idxDownGlobal*W + x)*dim + i] : 0.0;
		}
	}
	__syncthreads();

	//Do the convolution.
	float *channelSums = new float[dim];
	for (int i = 0; i < dim; i++) {
		channelSums[i] = 0.0;
	}

	int rad = (int)sd;
	float normaliser = 0.0;
	int sharedIdx = 0;
	for (int r = -rad; r <= rad; r++) {
		normaliser += kernel[rad - r];
		for (int i = 0; i < dim; i++) {
			sharedIdx = ((threadIdx.y + r)*blockDim.x + threadIdx.x)*dim + i;

			if (sharedIdx >= 0 && sharedIdx < sharedSize) {
				channelSums[i] += distData[sharedIdx] * kernel[rad - r];
			}
		}
	}

	if (normaliser > 0.0 || normaliser < 0.0) {
		for (int i = 0; i < dim; i++) {
			output[(y*W + x)*dim + i] = channelSums[i] / normaliser;
		}
	}
	delete[] channelSums;
}

Essentially, I am loading for each thread in the block the two elements at the kernel size distance away on each side.

I am aware that it induces a race condition, but I believe it is benign.

The input is a 3D tensor, and the i,j,k element is accessed as follows:-

input[x][y][z] = (y*WIDTH + x)*DEPTH + z

Thanks for reading.