CUDA, Flops

The hardware is:

# ./deviceQuery

There is 1 device supporting CUDA

Device 0: "GeForce GTX 260"
  Major revision number:                         1
  Minor revision number:                         3
  Total amount of global memory:                 939196416 bytes
  Number of multiprocessors:                     24
  Number of cores:                               192
  Total amount of constant memory:               65536 bytes
  Total amount of shared memory per block:       16384 bytes
  Total number of registers available per block: 16384
  Warp size:                                     32
  Maximum number of threads per block:           512
  Maximum sizes of each dimension of a block:    512 x 512 x 64
  Maximum sizes of each dimension of a grid:     65535 x 65535 x 1
  Maximum memory pitch:                          262144 bytes
  Texture alignment:                             256 bytes
  Clock rate:                                    1.30 GHz
  Concurrent copy and execution:                 Yes

Test PASSED

The card's nominal 'shading power' is ~700 Gflops. Using the program included below -which is not optimised for the given card and is reproduced without permission from the nvidia forum-, the actual measurements are as follows (10 repeats):

Using device 0: GeForce GTX 260
Time: 2.448000 (ms)
Gflops: 493.447512

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.447000 (ms)
Gflops: 493.649166

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.448000 (ms)
Gflops: 493.447512

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.447000 (ms)
Gflops: 493.649166

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.448000 (ms)
Gflops: 493.447512

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.447000 (ms)
Gflops: 493.649166

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.448000 (ms)
Gflops: 493.447512

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.448000 (ms)
Gflops: 493.447512

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.448000 (ms)
Gflops: 493.447512

Press ENTER to exit...

Using device 0: GeForce GTX 260
Time: 2.448000 (ms)
Gflops: 493.447512

Press ENTER to exit...

The code used for the example is:

/*
* Copyright 1993-2007 NVIDIA Corporation.  All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.  Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS,  WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION,  ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users.   This source code is a "commercial item" as
* that term is defined at  48 C.F.R. 2.101 (OCT 1995), consisting  of
* "commercial computer  software"  and "commercial computer software
* documentation" as such terms are  used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
 
/*
   This sample is intended to measure the peak computation rate of the GPU in GFLOPs
   (giga floating point operations per second).
 
   It executes a large number of multiply-add operations, writing the results to
   shared memory. The loop is unrolled for maximum performance.
 
   Depending on the compiler and hardware it might not take advantage of all the
   computational resources of the GPU, so treat the results produced by this code
   with some caution.
*/
 
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
 
#include <cutil.h>
 
#define NUM_SMS (24)
#define NUM_THREADS_PER_SM (384)
#define NUM_THREADS_PER_BLOCK (192)
#define NUM_BLOCKS ((NUM_THREADS_PER_SM / NUM_THREADS_PER_BLOCK) * NUM_SMS)
#define NUM_ITERATIONS 32
 
// 128 MAD instructions
#define FMAD128(a, b) \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
     a = b * a + b; \
     b = a * b + a; \
 
__shared__ float result[NUM_THREADS_PER_BLOCK];
 
__global__ void gflops()
{
   float a = result[threadIdx.x];  // this ensures the mads don't get compiled out
   float b = 1.01f;
 
   for (int i = 0; i < NUM_ITERATIONS; i++)
   {
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
       FMAD128(a, b);
   }
   result[threadIdx.x] = a + b;
}
 
int
main(int argc, char** argv)
{
   CUT_DEVICE_INIT(argc,argv);
 
   // warmup
   gflops<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>();
   CUDA_SAFE_CALL( cudaThreadSynchronize() );
 
   // execute kernel
   unsigned int timer = 0;
   CUT_SAFE_CALL( cutCreateTimer( &timer));
   CUT_SAFE_CALL( cutStartTimer( timer));
 
   gflops<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>();
 
   CUDA_SAFE_CALL( cudaThreadSynchronize() );
   CUT_SAFE_CALL( cutStopTimer( timer));
   float time = cutGetTimerValue( timer);
 
   // output results
   printf( "Time: %f (ms)\n", time);
   const int flops = 128 * 2 * 16 * NUM_ITERATIONS * NUM_BLOCKS * NUM_THREADS_PER_BLOCK;
   printf("Gflops: %f\n", (flops / (time / 1000.0f)) / 1e9 );
 
   CUT_SAFE_CALL( cutDeleteTimer( timer));
   CUT_EXIT(argc, argv);
}
about/benchmarks/cuda_flops.txt · Last modified: 2009/05/12 11:21 (external edit)