关于cutGetTimerValue(timer)的问题

cudaEvent_t start, stop;
cutilSafeCall( cudaEventCreate(&start) );
cutilSafeCall( cudaEventCreate(&stop) );

unsigned int timer;
cutilCheckError( cutCreateTimer(&timer) );
cutilCheckError( cutResetTimer(timer) );
cutilSafeCall( cudaThreadSynchronize() );
float gpu_time = 0.0f;

  cutilCheckError( cutStartTimer(timer) );

cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0);
increment_kernel<<<blocks, threads, 0, 0>>>(d_a, value);
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
cutilCheckError( cutStopTimer(timer) );

// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while( cudaEventQuery(stop) == cudaErrorNotReady )
{
counter++;
}
cutilSafeCall( cudaEventElapsedTime(&gpu_time, start, stop) );

// print the cpu and gpu times
printf(“time spent executing by the GPU: %.2f\n”, gpu_time);
printf(“time spent by CPU in CUDA calls: %.2f\n”, cutGetTimerValue(timer) );
printf(“CPU executed %d iterations while waiting for GPU to finish\n”, counter);

//以上摘自asyncAPI的sdk
//麻烦问下,cutGetTimerValue(timer) 这个计时是从哪个语句里的timer开始计时的呢?
谢谢。。