GPU端耗时统计
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaDeviceSynchronize()); float gpu_time = 0.0f;
cudaEventRecord(start, );//cuda context中的操作完毕事件被记录
//分配设备端内存
float *d_idata;
checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size)); //将主机端数据拷贝到设备端内存
checkCudaErrors(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice)); //设备端为结果分配内存
float *d_odata;
checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size)); //设置执行参数
dim3 grid(, , );
dim3 threads(num_threads, , ); //执行内核,参数含义:grid是网格的纬度,threads是块的纬度,mem_size最多能动态分配的共享内存大小
testKernel<<< grid, threads, mem_size >>>(d_idata, d_odata); //检查内核执行状态
getLastCudaError("Kernel execution failed"); //在主机端为结果分配内存
float *h_odata = (float *) malloc(mem_size);
//从设备端拷贝结果到主机端
checkCudaErrors(cudaMemcpy(h_odata, d_odata, sizeof(float) * num_threads,
cudaMemcpyDeviceToHost)); cudaEventRecord(stop, );
unsigned long int counter = ;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
printf("GPU执行耗时: %.2f (ms)\n", gpu_time);
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
CPU端耗时统计
StopWatchInterface *timer = ;
sdkCreateTimer(&timer);
sdkResetTimer(&timer); sdkStartTimer(&timer);
//计算参考方案
float *reference = (float *) malloc(mem_size);
computeGold(reference, h_idata, num_threads);
sdkStopTimer(&timer);
printf("串行耗时:%f (ms)\n", sdkGetTimerValue(&timer));