I donât think we can jump to that conclusion with certainty just based on the data you have shown. I donât have a Jetson Orin device to work on (if you want to discuss this with the Jetson Orin crowd, there is a separate/specific forum for that), but I modified your code to run on a GTX 1660 Super (22 SMs, cc7.5) device. Here is an example with some profiler output:
$ cat t39.cu
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <time.h>
#include <chrono>
#include <iostream>
void cudaCheck(cudaError_t ret, std::ostream& err = std::cerr) {
if (ret != cudaSuccess) {
printf("Cuda failure: %s", cudaGetErrorString(ret));
abort();
}
}
void sumArrays(float* a, float* b, float* res, const int size) {
for (int i = 0; i < size; i += 4) {
res[i] = a[i] + b[i];
res[i + 1] = a[i + 1] + b[i + 1];
res[i + 2] = a[i + 2] + b[i + 2];
res[i + 3] = a[i + 3] + b[i + 3];
}
}
void initialData(float* ip, int size) {
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++) {
ip[i] = (float)(rand() & 0xffff) / 1000.0f;
}
}
void checkResult(float* hostRef, float* gpuRef, const int N) {
double epsilon = 1.0E-8;
for (int i = 0; i < N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
printf("Results don\'t match!\n");
printf("%f(hostRef[%d] )!= %f(gpuRef[%d])\n", hostRef[i], i, gpuRef[i], i);
return;
}
}
printf("Check result success!\n");
}
__global__ void sumArraysGPU(const float* a, const float* b, float* c, int32_t n) {
int32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
for (int i = 0; i < 100000; i++) {
c[idx] = a[idx] + b[idx];
}
}
}
__global__ void sumArraysGPU_1(const float* a, const float* b, float* c, int32_t n) {
int32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
for (int i = 0; i < 100000; i++) {
c[idx] = a[idx] + b[idx];
}
}
}
int main(int argc, char** argv) {
int dev = 0;
cudaSetDevice(dev);
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaStream_t stream_1;
cudaStreamCreate(&stream_1);
cudaEvent_t startEvent, stopEvent;
cudaCheck(cudaEventCreate(&startEvent));
cudaCheck(cudaEventCreate(&stopEvent));
cudaEvent_t startEvent_1, stopEvent_1, startEvent_2, stopEvent_2;
cudaEventCreate(&startEvent_1);
cudaEventCreate(&stopEvent_1);
cudaEventCreate(&startEvent_2);
cudaEventCreate(&stopEvent_2);
const int nsms = 22;
int nElem = nsms*512;;
int nByte = sizeof(float) * nElem;
float* a_h = (float*)malloc(nByte);
float* b_h = (float*)malloc(nByte);
float* res_h = (float*)malloc(nByte);
float* res_from_gpu_h = (float*)malloc(nByte);
memset(res_h, 0, nByte);
memset(res_from_gpu_h, 0, nByte);
float *a_d, *b_d, *res_d;
cudaMalloc((float**)&a_d, nByte);
cudaMalloc((float**)&b_d, nByte);
cudaMalloc((float**)&res_d, nByte);
initialData(a_h, nElem);
initialData(b_h, nElem);
cudaMemcpy(a_d, a_h, nByte, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, nByte, cudaMemcpyHostToDevice);
float* a_h_1 = (float*)malloc(nByte);
float* b_h_1 = (float*)malloc(nByte);
float* res_h_1 = (float*)malloc(nByte);
float* res_from_gpu_h_1 = (float*)malloc(nByte);
memset(res_h_1, 0, nByte);
memset(res_from_gpu_h_1, 0, nByte);
float *a_d_1, *b_d_1, *res_d_1;
cudaMalloc((float**)&a_d_1, nByte);
cudaMalloc((float**)&b_d_1, nByte);
cudaMalloc((float**)&res_d_1, nByte);
initialData(a_h_1, nElem);
initialData(b_h_1, nElem);
cudaMemcpy(a_d_1, a_h_1, nByte, cudaMemcpyHostToDevice);
cudaMemcpy(b_d_1, b_h_1, nByte, cudaMemcpyHostToDevice);
// int test_cases[] = {128, 192, 256, 384, 512};
int test_cases[] = {512};
for (int i = 0; i < sizeof(test_cases)/sizeof(test_cases[0]); i++){
dim3 block(test_cases[i]);
nElem = block.x*nsms;
dim3 grid((nElem + block.x - 1) / block.x);
printf("Execution configuration<<<%d,%d>>>\n", grid.x, block.x);
printf("####### Launch single kernel #######\n");
cudaCheck(cudaEventRecord(startEvent, stream));
sumArraysGPU<<<grid, block, 0, stream>>>(a_d, b_d, res_d, nElem);
cudaCheck(cudaEventRecord(stopEvent, stream));
cudaCheck(cudaEventSynchronize(stopEvent));
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent);
printf("Single kernel execution time: %f ms\n", elapsedTime);
printf("####### Launch two kernel #######\n");
auto total_start = std::chrono::high_resolution_clock::now(); // ڟ°Ă„Ă„Ă„ĂŠĂ©
cudaCheck(cudaEventRecord(startEvent, stream));
sumArraysGPU<<<grid, block, 0, stream>>>(a_d, b_d, res_d, nElem);
cudaCheck(cudaEventRecord(stopEvent, stream));
cudaCheck(cudaEventRecord(startEvent_1, stream_1));
sumArraysGPU_1<<<grid, block, 0, stream_1>>>(a_d_1, b_d_1, res_d_1, nElem);
cudaCheck(cudaEventRecord(stopEvent_1, stream_1));
cudaCheck(cudaEventSynchronize(stopEvent));
cudaCheck(cudaEventSynchronize(stopEvent_1));
cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent_1);
auto total_stop = std::chrono::high_resolution_clock::now(); // ڟ°Ă„çÊÊé
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(total_stop - total_start);
std::cout << "Two kernel execution time: " << duration.count() << "," << elapsedTime << " ms\n"; // ÚÄʯ«Ă§ĂŠ
}
// cudaMemcpy(res_from_gpu_h, res_d, nByte, cudaMemcpyDeviceToHost);
// cudaMemcpy(res_from_gpu_h_1, res_d_1, nByte, cudaMemcpyDeviceToHost);
// sumArrays(a_h, b_h, res_h, nElem);
// checkResult(res_h, res_from_gpu_h, nElem);
// sumArrays(a_h_1, b_h_1, res_h_1, nElem);
// checkResult(res_h_1, res_from_gpu_h_1, nElem);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(res_d);
cudaFree(a_d_1);
cudaFree(b_d_1);
cudaFree(res_d_1);
cudaStreamDestroy(stream);
cudaStreamDestroy(stream_1);
// Destroy the CUDA events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
free(a_h);
free(b_h);
free(res_h);
free(res_from_gpu_h);
free(a_h_1);
free(b_h_1);
free(res_h_1);
free(res_from_gpu_h_1);
cudaDeviceReset();
return 0;
}
$ nvcc -o t39 t39.cu -arch=sm_75 -Xptxas=-v
ptxas info : 0 bytes gmem
ptxas info : Compiling entry function '_Z14sumArraysGPU_1PKfS0_Pfi' for 'sm_75'
ptxas info : Function properties for _Z14sumArraysGPU_1PKfS0_Pfi
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 18 registers, 380 bytes cmem[0]
ptxas info : Compiling entry function '_Z12sumArraysGPUPKfS0_Pfi' for 'sm_75'
ptxas info : Function properties for _Z12sumArraysGPUPKfS0_Pfi
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 18 registers, 380 bytes cmem[0]
$ ./t39
Execution configuration<<<22,512>>>
####### Launch single kernel #######
Single kernel execution time: 8.440832 ms
####### Launch two kernel #######
Two kernel execution time: 16,16.7485 ms
$ nsys nvprof --print-gpu-trace ./t39
WARNING: t39 and any of its children processes will be profiled.
Execution configuration<<<22,512>>>
####### Launch single kernel #######
Single kernel execution time: 8.449664 ms
####### Launch two kernel #######
Two kernel execution time: 16,16.7538 ms
Generating '/tmp/nsys-report-271d.qdstrm'
[1/3] [========================100%] report5.nsys-rep
[2/3] [========================100%] report5.sqlite
[3/3] Executing 'cuda_gpu_trace' stats report
Start (ns) Duration (ns) CorrId GrdX GrdY GrdZ BlkX BlkY BlkZ Reg/Trd StcSMem (MB) DymSMem (MB) Bytes (MB) Throughput (MBps) SrcMemKd DstMemKd Device Ctx Strm Name
----------- ------------- ------ ---- ---- ---- ---- ---- ---- ------- ------------ ------------ ---------- ----------------- -------- -------- --------------------------------- --- ---- ----------------------------------------------------------
259,427,846 2,880 131 0.045 15,644.434 Pageable Device NVIDIA GeForce GTX 1660 SUPER (0) 1 7 [CUDA memcpy HtoD]
259,440,166 2,688 132 0.045 16,761.868 Pageable Device NVIDIA GeForce GTX 1660 SUPER (0) 1 7 [CUDA memcpy HtoD]
259,780,935 2,688 136 0.045 16,761.868 Pageable Device NVIDIA GeForce GTX 1660 SUPER (0) 1 7 [CUDA memcpy HtoD]
259,792,583 2,688 137 0.045 16,761.868 Pageable Device NVIDIA GeForce GTX 1660 SUPER (0) 1 7 [CUDA memcpy HtoD]
259,849,575 8,418,292 139 22 1 1 512 1 1 18 0.000 0.000 NVIDIA GeForce GTX 1660 SUPER (0) 1 13 sumArraysGPU(const float *, const float *, float *, int)
268,298,555 10,648,441 144 22 1 1 512 1 1 18 0.000 0.000 NVIDIA GeForce GTX 1660 SUPER (0) 1 13 sumArraysGPU(const float *, const float *, float *, int)
268,305,755 16,740,392 147 22 1 1 512 1 1 18 0.000 0.000 NVIDIA GeForce GTX 1660 SUPER (0) 1 14 sumArraysGPU_1(const float *, const float *, float *, int)
Generated:
/home/bob/bobc/misc/report5.nsys-rep
/home/bob/bobc/misc/report5.sqlite
$
(CUDA 12.1)
Key items:
- the code when run for the 22 blocks, 512 threads per block test case shows approximately double the execution time for the two-kernel case, even though two kernels should be able to run concurrently on this sm_75 device with 22 SMs.
- the profiler suggests that in the two kernel case, the kernels may be running concurrently: the start times of the two kernels are within 8 microseconds of each other.
- the second kernel is clearly associated with the overall duration in the 2-kernel case. However I conclude there is overlap, because the first kernel runs in ~8ms in the one-kernel case, but ~10ms in the two kernel case. If there were no concurrency, we would not expect the duration of the first launched kernel to be noticeably impacted by the second launch.
Yes, I realize this is not a full explanation. Iâm simply pointing out that the data presented is not sufficient to conclude that the two kernels are not running in parallel (i.e. concurrently). You are not running into an occupancy limiter. The performance limiter is somewhere else. Bandwidth may be an issue. Note that these codes and footprints are small enough that the caches play an important role, so it may not be device memory bandwidth that is the limiter, but instead possibly L1 or L2 cache bandwidth. In my test case, the âfootprintâ for the 128 thread per block case is 128*22*4*3 bytes = 33Kbytes
. The footprint for the 512 thread per block case is therefore 4x of that or 132Kbytes. The 16 block case will be somewhat smaller. All of those should fit in the L2, with perhaps some or all fitting in the L1. (My GTX 1660 Super has ~1.5MB of L2.)
For amusement, if youâd like to see some further compiler optimization, try adding the __restrict__
keyword to each of your kernel pointer parameters.