看了好多遍,一直感觉其中temp在while循环中结束后是只得到一个值的,怎么还可能有cache[cacheIndex] = temp;这一步呢?这里要理解了下面也就通了。我不知道我理解错了哪个地方,很迫切想知道。可能问题太简单,但我找了网上的解答,没有回答这个问题的。先谢谢了!
#define imin(a,b) (a<b?a:b)
const int N = 1024* 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin( 512, (N+threadsPerBlock-1) / threadsPerBlock );
global void dot( float *a, float *b, float *c ){
shared float cache[threadsPerBlock];//定义共享存储器,这一步很重要
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// 设置cach[cachIndex]的值
cache[cacheIndex] = temp;
// 块同步处理
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main( void ) {
float a, b, c, partial_c;
float dev_a, dev_b, dev_partial_c;
// allocate memory on the CPU side
a = (float)malloc( Nsizeof(float) );
b = (float)malloc( Nsizeof(float) );
partial_c = (float)malloc( blocksPerGridsizeof(float) );
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a,Nsizeof(float) );
cudaMalloc( (void**)&dev_b, Nsizeof(float) );
cudaMalloc( (void**)&dev_partial_c, blocksPerGridsizeof(float) ); // fill in the host memory with data
for (int i=0; i<N; i++) {
a[i] = i;
b[i] = i2;
}
// copy the arrays ‘a’ and ‘b’ to the GPU
cudaMemcpy( dev_a, a, N* sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice ); dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b, dev_partial_c ); // copy the array ‘c’ back from the GPU to the CPU
// finish up on the CPU side
cudaMemcpy( partial_c, dev_partial_c,blocksPerGridsizeof(float),cudaMemcpyDeviceToHost );
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
#define sum_squares(x) (x(x+1)(2x+1)/6)
printf(“Does GPU value %.6g = %.6g?\n”, c, 2 * sum_squares( (float)(N - 1) ) );
// free memory on the GPU side
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_partial_c );
// free memory on the CPU side
free( a );
free( b );
free( partial_c );
getchar();
}