Texture addressing question

I thought I had texture addressing sorted, but now i’m not so sure…
I understood that if I set a texture as unnormalised cudaFilterModePoint, then what I get back in the kernel should be the same as what I put in at the CPU: ie no interpolation happening. However I consistently get slightly different values back (simplified code below), but not quite the same. What am I missing here?

texture<unsigned char, 2, cudaReadModeElementType> bitmapTexture;

// send an image up as a texture
// RGB only, as send as unsigned char

void doKernel( unsigned char *bitmap,
unsigned long bitmap_step,
unsigned bitmap_width,
unsigned bitmap_height,
unsigned channelCount // 3 for rgb
)

{

cudaArray *cuArray1;

cudaChannelFormatDesc channelDescBitmap = cudaCreateChannelDesc<unsigned char>();
cudaMallocArray(&cuArray1,&channelDescBitmap,bitmap_width * channelCount,bitmap_height);

cudaMemcpy2DToArray(
	cuArray1, 0,0,bitmap,bitmap_step * sizeof(unsigned char), 
	bitmap_width * sizeof(unsigned char), 
            bitmap_height * sizeof(unsigned char),
            cudaMemcpyHostToDevice);

bitmapTexture.addressMode[0] = cudaAddressModeWrap;
bitmapTexture.addressMode[1] = cudaAddressModeWrap;
bitmapTexture.filterMode = cudaFilterModePoint;

cudaBindTextureToArray(bitmapTexture, cuArray1, channelDescBitmap);

dim3 dimBlock(1);		// just for demo
dim3 dimGrid(1);

odd_kernel<<<dimGrid, dimBlock>>>();

}

global void odd_kernel()
{

int incomingXbytes = 5;
int incomingYbytes = 2;

// these are very close, but not quite, say 65 vs 64

unsigned char componentValue_R = tex2D(bitmapTexture, incomingXbytes,   incomingYbytes);
unsigned char componentValue_G = tex2D(bitmapTexture, incomingXbytes+1, incomingYbytes);
unsigned char componentValue_B = tex2D(bitmapTexture, incomingXbytes+2, incomingYbytes);

}

Solved: had assumed that the texture ‘normalized’ boolean was false by default: bad assumption.