MPI 的简单使用

▶ 源代码。主机根结点生成随机数组,发布副本到各结点(例子用孩子使用了一个结点),分别使用 GPU 求平方根并求和,然后根结点使用 MPI 回收各节点的计算结果,规约求和后除以数组大小(相当于球随机数组中所有元素的平方根的平均值)。

 // simpleMPI.h
extern "C"
{
void initData(float *data, int dataSize);
void computeGPU(float *hostData, int blockSize, int gridSize);
float sum(float *data, int size);
void my_abort(int err);
}
 // simpleMPI.cu
#include <iostream>
#include <mpi.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "simpleMPI.h" using std::cout;
using std::cerr;
using std::endl; #define CUDA_CHECK(call) \
if((call) != cudaSuccess) \
{ \
cudaError_t err = cudaGetLastError(); \
cerr << "CUDA error calling \""#call"\", code is " << err << endl; \
my_abort(err); \
} // GPU 计算平方根
__global__ void simpleMPIKernel(float *input, float *output)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
output[tid] = sqrt(input[tid]);
} // 初始化数组
void initData(float *data, int dataSize)
{
for (int i = ; i < dataSize; i++)
data[i] = (float)rand() / RAND_MAX;
} // 使用 GPU 进行计算的函数
void computeGPU(float *hostData, int blockSize, int gridSize)
{
int dataSize = blockSize * gridSize; float *deviceInputData = NULL;
CUDA_CHECK(cudaMalloc((void **)&deviceInputData, dataSize * sizeof(float))); float *deviceOutputData = NULL;
CUDA_CHECK(cudaMalloc((void **)&deviceOutputData, dataSize * sizeof(float))); CUDA_CHECK(cudaMemcpy(deviceInputData, hostData, dataSize * sizeof(float), cudaMemcpyHostToDevice)); simpleMPIKernel<<<gridSize, blockSize>>>(deviceInputData, deviceOutputData); CUDA_CHECK(cudaMemcpy(hostData, deviceOutputData, dataSize *sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(deviceInputData));
CUDA_CHECK(cudaFree(deviceOutputData));
} // 简单的求和函数
float sum(float *data, int size)
{
float accum = .f;
for (int i = ; i < size; i++)
accum += data[i];
return accum;
} // 中止函数
void my_abort(int err)
{
cout << "Test FAILED\n";
MPI_Abort(MPI_COMM_WORLD, err);
}
 // simpleMPI.cpp
#include <mpi.h>
#include <iostream>
#include "simpleMPI.h" using std::cout;
using std::cerr;
using std::endl; #define MPI_CHECK(call) if((call) != MPI_SUCCESS) { cerr << "MPI error calling \""#call"\"\n"; my_abort(-1); } int main(int argc, char *argv[])
{
int blockSize = ;
int gridSize = ;
int dataSizePerNode = gridSize * blockSize; // 初始化 MPI
MPI_CHECK(MPI_Init(&argc, &argv)); // 获取节点尺寸和编号
int commSize, commRank;
MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &commSize));
MPI_CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &commRank)); // 根结点生成随机数组
int dataSizeTotal = dataSizePerNode * commSize;
float *dataRoot = NULL;
if (commRank == )
{
cout << "Running on " << commSize << " nodes" << endl;
dataRoot = new float[dataSizeTotal];
initData(dataRoot, dataSizeTotal);
} // 每个结点上申请数组用于接收根结点发来的数据
float *dataNode = new float[dataSizePerNode]; MPI_CHECK(MPI_Scatter(dataRoot, dataSizePerNode, MPI_FLOAT, dataNode, dataSizePerNode, MPI_FLOAT, , MPI_COMM_WORLD)); // 清空根节点数据
if (commRank == )
delete [] dataRoot; // 每个结点调用 GPU 计算平方根,然后规约到一个值
computeGPU(dataNode, blockSize, gridSize);
float sumNode = sum(dataNode, dataSizePerNode); // 使用 MPI 接收每个结点的计算结果并进行规约
float sumRoot;
MPI_CHECK(MPI_Reduce(&sumNode, &sumRoot, , MPI_FLOAT, MPI_SUM, , MPI_COMM_WORLD)); // 回收和输出工作
delete[] dataNode;
MPI_CHECK(MPI_Finalize()); if (commRank == )
{
float average = sumRoot / dataSizeTotal;
cout << "Average of square roots is: " << average << endl;
cout << "PASSED\n";
} getchar();
return ;
}

▶ 输出结果

Running on  nodes
Average of square roots is: 0.667507
PASSED

▶ 涨姿势

● 集中在 MPI 的几何函数的使用上,CUDA 部分没有新的认识。

05-08 14:51