我无法让MPI发送到任意目的地。如果我硬编码目的地,它工作得很好,但如果我尝试生成一个随机的,它不会。以下是一些相关代码:

    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD,&myid);
    srand48(myid);
    request=MPI_REQUEST_NULL;
    if(myid == 0){
            buffer=drand48();
            do {
                    destination=lrand48() % numprocs;
            } while (destination == 0); //Prevent sending to self
            MPI_Isend(&buffer,1,MPI_DOUBLE,destination,1234,MPI_COMM_WORLD,&request);

    }
    else if (myid == destination) {
            MPI_Irecv(&buffer,1,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&request);

    }
    if(myid == 0){
            printf("processor %d  sent %lf to %d\n",myid,buffer,destination);
    }
    else {
            printf("processor %d  got %lf\n",myid,buffer);
    }

当我使用mpicc main.c运行程序时,使用mpirun -np 4 ./a.out可以很好地编译,输出如下:
processor 0  sent 0.170828 to 2
processor 1  got 0.000000
processor 2  got 0.000000
processor 3  got 0.000000

例如,如果我将目标硬编码为2,则会得到预期的输出:
processor 0  sent 0.170828
processor 1  got 0.000000
processor 2  got 0.170828
processor 3  got 0.000000

最佳答案

MPI_IsendMPI_Irecv启动相应的非阻塞操作。在您将返回的request句柄从MPI_WaitMPI_Test系列传递给函数之前,不能保证它们会完成(如果使用测试函数,则请求的完成状态将在布尔变量中传回,并且只要布尔标志保持false,请求就不会完成)。
不过,您的代码在概念上有问题。MPI是一种分布式内存范例——每个MPI列组实际上都存在于其单独的地址空间中(尽管标准并不严格要求,但实际上所有MPI实现都提供了这一点)。因此,在秩0中设置destination不会神奇地将其值传递给其他进程。您可以先广播该值,也可以向所有其他列组发送特殊的“空”消息,例如:

if (myid == 0) {
   MPI_Request reqs[numprocs];

   buffer=drand48();
   do {
      destination=lrand48() % numprocs;
   } while (destination == 0); //Prevent sending to self
   for (i = 1; i < numprocs; i++) {
      if (i == destination)
         MPI_Isend(&buffer,1,MPI_DOUBLE,i,1234,MPI_COMM_WORLD,&reqs[i]);
      else
         // Send an empty message with different tag
         MPI_Isend(&buffer,0,MPI_DOUBLE,i,4321,MPI_COMM_WORLD,&reqs[i]);
   }
   reqs[0] = MPI_REQUEST_NULL;
   MPI_Waitall(numprocs, reqs, MPI_STATUSES_IGNORE);
   printf("processor %d  sent %lf to %d\n",myid,buffer,destination);
}
else {
   MPI_Status status;

   MPI_Recv(&buffer,1,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
   if (status.MPI_TAG == 1234)
      printf("processor %d  got %lf\n",myid,buffer);
}

通过广播,代码看起来像:
request=MPI_REQUEST_NULL;
if (myid == 0) {
   buffer=drand48();
   do {
      destination=lrand48() % numprocs;
   } while (destination == 0); //Prevent sending to self
   MPI_Bcast(&destination,1,MPI_INT,0,MPI_COMM_WORLD);
   MPI_Isend(&buffer,1,MPI_DOUBLE,destination,1234,MPI_COMM_WORLD,&request);
}
else {
   MPI_Bcast(&destination,1,MPI_INT,0,MPI_COMM_WORLD);
   if (myid == destination) {
      MPI_Irecv(&buffer,1,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&request);
   }
}
MPI_Wait(&request, MPI_STATUS_IGNORE);
if (myid == 0) {
   printf("processor %d  sent %lf to %d\n",myid,buffer,destination);
}
else {
   printf("processor %d  got %lf\n",myid,buffer);
}

10-07 17:22