nvcc -Xptxas -dlcm=ca makes program slower than cg

Hi:

I tried a small program which is used to test theoretical max FLOPs of my gtx 1080 Ti CUDA device.

It works well & reaches the theoretical max flops ~ 11Tflops.

However, when I tried to make with nvcc by Xptxas -dlcm=ca, the flops drops to 50% only ~ 5.5Tflops.

Then I compared the two cases of nvcc Xptxas -dlcm=cg & Xptxas -dlcm=ca, the speed indeed differs by 50%.

My questions are:

1. Why open L1 cache downgrades performance in this case?

  1. I posted the nvprof diff below, could someone gives a hint about how to figure out the meaning of this difference? Can you explain some thing from it please?

https://photos.google.com/photo/AF1QipOF8B3zrf2CUJTadNPOAQw0aRqN7aWDXnECaS42

https://photos.google.com/photo/AF1QipNwuWZY0pk1eniCokYc5lFOeXJvp1U3mSXr5aQe

  1. I put the whole experiment result at, please refer to it, thanks~

https://drive.google.com/open?id=1ATrxIRXicrEC4wsERxSZNK1DFzZbk85B

  1. According to the spec, the default compile option is Xptxas -dlcm=ca; however, if I don’t specify Xptxas -dlcm=, the speed is 11Tflops. It seems that it implies the default option is cg instead of ca? Is it correct???

  2. Is it implies that if within my kernel program, all operands can be hold by registers (in this case, each SM has 1024 shared floating point variables so needs 4K bytes only), I should use cg which is faster than ca??

Thanks~

/*

 * Copyright 1993-2007 NVIDIA Corporation.  All rights reserved.

 *

 * NOTICE TO USER:

 *

 * This source code is subject to NVIDIA ownership rights under U.S. and

 * international Copyright laws.  Users and possessors of this source code

 * are hereby granted a nonexclusive, royalty-free license to use this code

 * in individual and commercial software.

 *

 * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE

 * CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR

 * IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH

 * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF

 * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.

 * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,

 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS

 * OF USE, DATA OR PROFITS,  WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE

 * OR OTHER TORTIOUS ACTION,  ARISING OUT OF OR IN CONNECTION WITH THE USE

 * OR PERFORMANCE OF THIS SOURCE CODE.

 *

 * U.S. Government End Users.   This source code is a "commercial item" as

 * that term is defined at  48 C.F.R. 2.101 (OCT 1995), consisting  of

 * "commercial computer  software"  and "commercial computer software

 * documentation" as such terms are  used in 48 C.F.R. 12.212 (SEPT 1995)

 * and is provided to the U.S. Government only as a commercial end item.

 * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through

 * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the

 * source code with only those rights set forth herein.

 *

 * Any use of this source code in individual and commercial software must

 * include, in the user documentation and internal comments to the code,

 * the above Disclaimer and U.S. Government End Users Notice.

 */

/*

    This sample is intended to measure the peak computation rate of the GPU in GFLOPs

    (giga floating point operations per second).

It executes a large number of multiply-add operations, writing the results to

    shared memory. The loop is unrolled for maximum performance.

Depending on the compiler and hardware it might not take advantage of all the

    computational resources of the GPU, so treat the results produced by this code

    with some caution.

*/

#include <stdlib.h>

#include <stdio.h>

#include <string.h>

#include <math.h>

#include <cuda_runtime.h>

//#include <cutil.h>
#include <cuda.h>

#define NUM_SMS (28)

//#define NUM_THREADS_PER_SM (1024)
#define NUM_THREADS_PER_SM (512)
//#define NUM_THREADS_PER_SM (256)
//#define NUM_THREADS_PER_SM (128)
//#define NUM_THREADS_PER_SM (64)

//#define NUM_THREADS_PER_BLOCK (128)
//#define NUM_THREADS_PER_BLOCK (128)
//#define NUM_BLOCKS ( (NUM_THREADS_PER_SM * NUM_SMS / NUM_THREADS_PER_BLOCK) )

#define NUM_BLOCKS ( 28 )
#define NUM_THREADS_PER_BLOCK (1024)

#define NUM_ITERATIONS 1024

// 128 MAD instructions

#define FMAD128(a, b) \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a; \
      a = b * a + b; \
      b = a * b + a;

__shared__ float result[NUM_THREADS_PER_BLOCK];

__global__ void gflops()

{

    float a = result[threadIdx.x];  // this ensures the mads don't get compiled out

    float b = 1.01f;

    for (int i = 0; i < NUM_ITERATIONS; i++)
    {

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

        FMAD128(a, b);

    }

    result[threadIdx.x] = a + b;

}

int main(int argc, char** argv) 
{

    //CUT_DEVICE_INIT();

    // warmup

    gflops<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>();

	cudaThreadSynchronize();

// execute kernel
	cudaEvent_t start, stop;
	cudaEventCreate(&start);
	cudaEventCreate(&stop);
	cudaEventRecord(start);
    gflops<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>();
	cudaDeviceSynchronize();
	cudaEventRecord(stop);
    cudaEventSynchronize(stop);
	
	float milliseconds = 0;
	cudaEventElapsedTime(&milliseconds, start, stop);
	printf("milliseconds: %f\n", milliseconds);

    // output results

float flops = ((float)128) * 2 * 16 * NUM_ITERATIONS * NUM_BLOCKS * NUM_THREADS_PER_BLOCK;
	printf( "flops: %f \n", flops);
	//printf("Gflops: %f\n", (flops / (milliseconds / 1000.0f)) / 1e9 );
    printf("Gflops: %f\n", (((flops * 1000.0f) / (milliseconds)) / 1e9) );

}