Why does kernel with __syncthreads() and conditional checks run faster than kernel without on NVIDIA Tesla K20M?

In measuring the runtime of two kernels I fail to understand why the kernel I expect to be slower (because of block-level synchronization and two extra conditional checks) is actually faster. I’ve only observed this behaviour when using NVIDIA Tesla k20m, the runtime behaves as expected when using a Titan XP. Nevertheless, I’m curious as to why this is so.

Compile and run with an argument that specifies the number of iterations the kernel should perform. In my case, I noticed the difference with 100,000 iterations. Version 1 completes in approximately 453 Seconds while Version 2 completes in approximately 422 Seconds.

I’m using cuda 9.0 and compiling with nvcc filename.cu, no additional flags.

Version 1:

#include <stdio.h>
#include <stdlib.h>

__global__
void kernel(unsigned long *res, unsigned long n)
{
  int x = 0;

  for(int i = 0; i < n; i++)
  {
    x++;
  }

  *res = x;
}

int main(int argc, char *argv[])
{
  if(argc != 2)
  {
    fprintf(stderr, "Invalid arguments.\nUsage: %s <iterations>\n", argv[0]);
    exit(EXIT_FAILURE);
  }

  unsigned long n = strtol(argv[1], NULL, 10);

  unsigned long h_res;
  unsigned long *d_res;

  cudaMalloc((void **)&d_res, sizeof(unsigned long));

  kernel<<<2097152, 1024>>>(d_res, n);

  cudaMemcpy(&h_res, d_res, sizeof(unsigned long), cudaMemcpyDeviceToHost);

  fprintf(stdout, "Result(%lu) = %lu\n", n, h_res);

  cudaFree(d_res);

  return EXIT_SUCCESS;
}

Version 2

#include <stdio.h>
#include <stdlib.h>

__global__
void kernel(volatile unsigned int *t, unsigned long *res, unsigned long n)
{
  __shared__ unsigned int test;

  if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
  {
    test = *t;
  }

  __syncthreads();

  if(test == 1)
  {
    return;
  }

  int x = 0;

  for(int i = 0; i < n; i++)
  {
    x++;
  }

  *res = x;
}

int main(int argc, char *argv[])
{
  if(argc != 2)
  {
    fprintf(stderr, "Invalid arguments.\nUsage: %s <iterations>\n", argv[0]);
    exit(EXIT_FAILURE);
  }

  unsigned long n = strtol(argv[1], NULL, 10);

  unsigned long h_res;
  unsigned long *d_res;
  volatile unsigned int *t;

  cudaMalloc((void **)&d_res, sizeof(unsigned long));
  cudaHostAlloc((void **)&t, sizeof(volatile unsigned int), cudaHostAllocMapped);

  *t = 0;

  kernel<<<2097152, 1024>>>(t, d_res, n);

  cudaMemcpy(&h_res, d_res, sizeof(unsigned long), cudaMemcpyDeviceToHost);

  fprintf(stdout, "Result(%lu) = %lu\n", n, h_res);

  cudaFree((void *)d_res);
  cudaFreeHost((void *)t);

  return EXIT_SUCCESS;
}

Output of Device Query:

CUDA Device Query (Runtime API) version (CUDART static linking)

Detected 1 CUDA Capable device(s)

Device 0: "Tesla K20m"
  CUDA Driver Version / Runtime Version          9.0 / 9.0
  CUDA Capability Major/Minor version number:    3.5
  Total amount of global memory:                 4743 MBytes (4972937216 bytes)
  (13) Multiprocessors, (192) CUDA Cores/MP:     2496 CUDA Cores
  GPU Max Clock rate:                            706 MHz (0.71 GHz)
  Memory Clock rate:                             2600 Mhz
  Memory Bus Width:                              320-bit
  L2 Cache Size:                                 1310720 bytes
  Maximum Texture Dimension Size (x,y,z)         1D=(65536), 2D=(65536, 65536), 3D=(4096, 4096, 4096)
  Maximum Layered 1D Texture Size, (num) layers  1D=(16384), 2048 layers
  Maximum Layered 2D Texture Size, (num) layers  2D=(16384, 16384), 2048 layers
  Total amount of constant memory:               65536 bytes
  Total amount of shared memory per block:       49152 bytes
  Total number of registers available per block: 65536
  Warp size:                                     32
  Maximum number of threads per multiprocessor:  2048
  Maximum number of threads per block:           1024
  Max dimension size of a thread block (x,y,z): (1024, 1024, 64)
  Max dimension size of a grid size    (x,y,z): (2147483647, 65535, 65535)
  Maximum memory pitch:                          2147483647 bytes
  Texture alignment:                             512 bytes
  Concurrent copy and kernel execution:          Yes with 2 copy engine(s)
  Run time limit on kernels:                     No
  Integrated GPU sharing Host Memory:            No
  Support host page-locked memory mapping:       Yes
  Alignment requirement for Surfaces:            Yes
  Device has ECC support:                        Enabled
  Device supports Unified Addressing (UVA):      Yes
  Device PCI Domain ID / Bus ID / location ID:   0 / 4 / 0
  Compute Mode:
     < Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) >

deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 9.0, CUDA Runtime Version = 9.0, NumDevs = 1, Device0 = Tesla K20m
Result = PASS