I am able to use ncu to get the metrics related to Launch Metrics, Source Metrics and Instructions Per Opcode Metrics (found here). However I am unable to use CUPTI to get the values after modifying the METRIC_NAME
in the sample code /usr/local/cuda-11.8/extras/CUPTI/samples/callback_profiling/callback_profiling.cu
. I get an error
FAILED: NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest(&convertMetricToEvalRequest) with error NVPA_STATUS_INVALID_ARGUMENT
.
I have tried using the names - smsp__branch_targets_threads_divergent
, smsp__branch_targets_threads_divergent.sum
, device__attribute_smsp__branch_targets_threads_divergent
, but none helped.
Can someone let me know how do I get the values using CUPTI? Is there any Nvidia CUPTI sample for the same?
A small kernel to profile:
#define N (2048 * 8)
#define THREADS_PER_BLOCK 512
void __device__ add1_device(const int x, const int y, int *z)
{
*z = x * y;
}
__global__ void dot(int *a, int *b, int *c)
{
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x;
// temp[threadIdx.x] = a[index] * b[index];
add1_device(a[index], b[index], &temp[threadIdx.x]); // Comment this line and uncomment the previous to not use the _-device__
__syncthreads();
if (threadIdx.x == 0)
{
int sum = 0;
for (int i = 0; i < THREADS_PER_BLOCK; i++)
{
sum += temp[i];
}
atomicAdd(c, sum);
}
}
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N * sizeof(int);
//allocate space for the variables on the device
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_b, size);
cudaMalloc((void **)&dev_c, sizeof(int));
//allocate space for the variables on the host
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(sizeof(int));
//this is our ground truth
int prodTest = 0;
//generate numbers
for (int i = 0; i < N; i++)
{
a[i] = rand() % 10;
b[i] = rand() % 10;
}
*c = 0;
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, sizeof(int), cudaMemcpyHostToDevice);
dot<<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);