MPI documentation断言接收缓冲区(recvbuf)的地址地址仅在根目录下有效。这意味着内存不能在其他进程中分配。这由this question证实。

int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
               MPI_Op op, int root, MPI_Comm comm)

起初我认为recvbuf甚至不存在:对于recvbuf本身的记忆不必被分配(例如通过动态分配)。不幸的是(我花了很多时间才明白我的错误!)看来,即使它指向的内存是无效的,指针本身也必须存在。
请看下面的代码,我有一个版本,给出了一个segfault,一个没有。
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>

int main(int argc, char **argv) {
   // MPI initialization
    int world_rank, world_size;
    MPI_Init(NULL, NULL);
    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);

    int n1 = 3, n2 = 10; // Sizes of the 2d arrays

    long **observables = (long **) malloc(n1 * sizeof(long *));
    for (int k = 0 ; k < n1 ; ++k) {
        observables[k] = (long *) calloc(n2, sizeof(long));
        for (long i = 0 ; i < n2 ; ++i) {
            observables[k][i] = k * i * world_rank; // Whatever
        }
    }

    long **obs_sum; // This will hold the sum on process 0
#ifdef OLD  // Version that gives a segfault
    if (world_rank == 0) {
        obs_sum = (long **) malloc(n2 * sizeof(long *));
        for (int k = 0 ; k < n2 ; ++k) {
            obs_sum[k] = (long *) calloc(n2, sizeof(long));
        }
    }
#else // Correct version
   // We define all the pointers in all the processes.
    obs_sum = (long **) malloc(n2 * sizeof(long *));
    if (world_rank == 0) {
        for (int k = 0 ; k < n2 ; ++k) {
            obs_sum[k] = (long *) calloc(n2, sizeof(long));
        }
    }
#endif

    for (int k = 0 ; k < n1 ; ++k) {
        // This is the line that results in a segfault if OLD is defined
        MPI_Reduce(observables[k], obs_sum[k], n2, MPI_LONG, MPI_SUM, 0,
                   MPI_COMM_WORLD);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Finalize();
    // You may free memory here

    return 0;
}

我解释得对吗?这种行为背后的理由是什么?

最佳答案

问题不在于MPI,而在于您传递的是obs_sum[k],但您根本没有定义/分配它。

for (int k = 0 ; k < n1 ; ++k) {
    // This is the line that results in a segfault if OLD is defined
    MPI_Reduce(observables[k], obs_sum[k], n2, MPI_LONG, MPI_SUM, 0,
               MPI_COMM_WORLD);
}

即使MPI_Reduce()没有得到它的值,生成的代码也会得到obs_sum(未定义且未分配),向它添加k,并尝试读取要传递给MPI_Reduce()的指针(segfault)。
例如,行的分配应足以使其工作:
#else // Correct version
      // We define all the pointers in all the processes.
      obs_sum = (long **) malloc(n2 * sizeof(long *));
      // try commenting out the following lines
      // if (world_rank == 0) {
      //   for (int k = 0 ; k < n2 ; ++k) {
      //     obs_sum[k] = (long *) calloc(n2, sizeof(long));
      //   }
      // }
#endif

我会分配一个二维数组作为一个平面数组-我真的讨厌这种数组表示。这样不是更好吗?
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>

int main(int argc, char **argv) {
   // MPI initialization
    int world_rank, world_size;
    MPI_Init(NULL, NULL);
    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);

    int n1 = 3, n2 = 10; // Sizes of the 2d arrays

    long *observables = (long *) malloc(n1*n2*sizeof(long));
    for (int k = 0 ; k < n1 ; ++k) {
        for (long i = 0 ; i < n2 ; ++i) {
            observables[k*n2+i] = k * i * world_rank; // Whatever
        }
    }

    long *obs_sum = nullptr; // This will hold the sum on process 0
    if (world_rank == 0) {
        obs_sum = (long *) malloc(n1*n2*sizeof(long));
    }

    MPI_Reduce(observables, obs_sum, n1*n2, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD);

    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Finalize();
    // You may free memory here

    return 0;
}

07-28 02:56
查看更多