You’ll need to unroll the loop to get close to peak performance. The following code gets about 315 GFlops on my machine:

```
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
This sample is intended to measure the peak computation rate of the GPU in GFLOPs
(giga floating point operations per second).
It executes a large number of multiply-add operations, writing the results to
shared memory. The loop is unrolled for maximum performance.
Depending on the compiler and hardware it might not take advantage of all the
computational resources of the GPU, so treat the results produced by this code
with some caution.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
#define NUM_SMS (16)
#define NUM_THREADS_PER_SM (384)
#define NUM_THREADS_PER_BLOCK (192)
#define NUM_BLOCKS ((NUM_THREADS_PER_SM / NUM_THREADS_PER_BLOCK) * NUM_SMS)
#define NUM_ITERATIONS 10
// 128 MAD instructions
#define FMAD128(a, b) \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
__shared__ float result[NUM_THREADS_PER_BLOCK];
__global__ void gflops()
{
float a = result[threadIdx.x]; // this ensures the mads don't get compiled out
float b = 1.01f;
for (int i = 0; i < NUM_ITERATIONS; i++)
{
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
}
result[threadIdx.x] = a + b;
}
int
main(int argc, char** argv)
{
CUT_DEVICE_INIT();
// warmup
gflops<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>();
CUDA_SAFE_CALL( cudaThreadSynchronize() );
// execute kernel
unsigned int timer = 0;
CUT_SAFE_CALL( cutCreateTimer( &timer));
CUT_SAFE_CALL( cutStartTimer( timer));
gflops<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>();
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL( cutStopTimer( timer));
float time = cutGetTimerValue( timer);
// output results
printf( "Time: %f (ms)\n", time);
const int flops = 128 * 2 * 16 * NUM_ITERATIONS * NUM_BLOCKS * NUM_THREADS_PER_BLOCK;
printf("Gflops: %f\n", (flops / (time / 1000.0f)) / 1e9 );
CUT_SAFE_CALL( cutDeleteTimer( timer));
CUT_EXIT(argc, argv);
}
```