我正在用C语言编写一个程序,使用MPI并行执行矩阵乘法。我对C和MPI很陌生,所以这是一个相当粗糙的代码。我似乎无法让我的代码正常工作,那么有人能帮我通读一下并帮助我理解我需要做些什么来修复它吗?
下面是代码:

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>

// code adapted from source codes from
//  http://www.programiz.com/c-programming/c-multi-dimensional-arrays
//  http://www.cs.hofstra.edu/~cscccl/csc145/imul.c


// GENERAL VARIABLES
int **A, **B, **AB;
int i,j,k;
int rows_A, cols_A, rows_B, cols_B;
int dimensions[3];

// MATRIX MULTIPLICATION
void matrixMult(int start, int interval){
for (i = start; i < start+interval; ++i){
    for (j = 0; j < cols_B; ++j){
        for (k = 0; k < cols_A; ++k)
            AB[i][j] += (A[i][k] * B[k][j]);}}}


int main(int argc, char *argv[]){
// MPI VARIABLES, INITIALIZE MPI
int rank, size, interval, remainder;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);

if (rank == 0){

    // READ AND WRITE MATRICES ------------------------------------
    FILE *matrix1, *matrix2;
    matrix1 = fopen("matrix1", "r");
    fscanf(matrix1, "%d", &rows_A);
    fscanf(matrix1, "%d", &cols_A);

    matrix2 = fopen("matrix2", "r");
    fscanf(matrix2, "%d", &rows_B);
    fscanf(matrix2, "%d", &cols_B);

    int dimensions[3] = {rows_A, cols_A, cols_B};

    /*printf("\n\nRows A = %d",rows_A);
    printf("\nCols A = %d",cols_A);
    printf("\n\nRows B = %d",rows_B);
    printf("\nCols B = %d",cols_B);*/

    // Allocate memory for matrices
    int **A = malloc(rows_A * sizeof(int*));
    // The cast to size_t prevents integer overflow with big matrices
    A[0] = malloc((size_t)rows_A * (size_t)cols_A * sizeof(int));
    for(i = 1; i < rows_A; i++)
            A[i] = A[0] + i*cols_A;

    int **B = malloc(rows_B * sizeof(int*));
    // The cast to size_t prevents integer overflow with big matrices
    B[0] = malloc((size_t)rows_B * (size_t)cols_B * sizeof(int));
    for(i = 1; i < rows_A; i++)
            B[i] = B[0] + i*cols_B;

    int **AB = malloc(rows_A * sizeof(int*));
    // The cast to size_t prevents integer overflow with big matrices
    AB[0] = malloc((size_t)rows_A * (size_t)cols_B * sizeof(int));
    for(i = 1; i < rows_A; i++)
            AB[i] = AB[0] + i*cols_B;


    /*int **A = (int **)malloc(rows_A * sizeof(int*));
    for(i = 0; i < rows_A; i++)
        A[i] = (int *)malloc(cols_A * sizeof(int));

    int **B = (int **)malloc(rows_B * sizeof(int*));
    for(i = 0; i < rows_B; i++)
        B[i] = (int *)malloc(cols_B * sizeof(int));

    int **AB = (int **)malloc(rows_A * sizeof(int*));
    for(i = 0; i < rows_B; i++)
        AB[i] = (int *)malloc(cols_B * sizeof(int));*/


    // Write matrices
    while(!feof(matrix1)){
        for(i=0;i<rows_A;i++){
            for(j=0;j<cols_A;j++)
                fscanf(matrix1,"%d",&A[i][j]);}}

    while(!feof(matrix2)){
    for(i=0;i<rows_B;i++){
        for(j=0;j<cols_B;j++)
            fscanf(matrix2,"%d",&B[i][j]);}}

    /*
    // Print Matrices
    printf("\n\n");
    //print matrix 1
    printf("Matrix A:\n");
    for(i=0;i<rows_A;i++){
        for(j=0;j<cols_A;j++)
            printf("%d\t",A[i][j]);
        printf("\n");}
    printf("\n");
    //print matrix 2
    printf("Matrix B:\n");
    for(i=0;i<rows_B;i++){
        for(j=0;j<cols_B;j++)
            printf("%d\t",B[i][j]);
        printf("\n");} */
    // ------------------------------------------------------------------




    // MULTIPLICATION (Parallelize here)

    printf("begin rank 0\n");

    interval = rows_A / size; // work per processor
    remainder = rows_A % size;

    // SEND B BROADCAST to all
    MPI_Bcast(B, rows_B * cols_B, MPI_INT, 0, MPI_COMM_WORLD);
    printf("1\n");
    // SEND A, ROWS, COLS, interval to each rank
    for(i=1;i<size;i++)
        MPI_Send(dimensions,3,MPI_INT,i,123,MPI_COMM_WORLD);
    printf("2\n");
    for(i=1;i<size;i++)
        MPI_Send(A[i*interval],interval*rows_A,MPI_INT,i,123,MPI_COMM_WORLD);
    printf("3\n");

    // ROOT MM
    matrixMult(0, interval);
    printf("3.5\n");
    matrixMult(size * interval, remainder);
    printf("4\n");

    // receive AB from workers, add to current AB
    for(i=1;i<size;i++)
        MPI_Recv(AB[i*interval],interval*rows_A,MPI_INT,i,123,MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    printf("5\n");





    // PRINT MATRIX PRODUCT
    printf("\nSum Of Matrix:\n");
    for(i = 0; i < rows_A; ++i){
        for(j = 0; j < cols_B; ++j){
            printf("%d\t",AB[i][j]);
            if(j == cols_B - 1)/* To display matrix sum in order. */
                printf("\n");}}

    // CLOSE FILES
    fclose(matrix1);
    fclose(matrix2);



}

else{ // WORKER NODES
    printf("bring workers\n");
    // RECEIVE B BROADCAST
    MPI_Bcast(B, rows_B * cols_B, MPI_INT, 0, MPI_COMM_WORLD);
    printf("a\n");
    // RECEIVE A, INTERVAL
    MPI_Recv(dimensions,3,MPI_INT,0,123, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
    printf("b\n");
    rows_A = dimensions[0];
    cols_A = dimensions[1];
    cols_B = dimensions[2];
    printf("c\n");
    MPI_Recv(A[rank*interval],interval*rows_A,MPI_INT,0,123, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
    printf("d\n");

    // WORKER MM
    matrixMult(rank*interval, interval);
    printf("e\n");

    // send AB to root
    MPI_Send(AB[rank*interval],interval*rows_A,MPI_INT,0,123,MPI_COMM_WORLD);
    printf("f\n");
}

// FINALIZE MPI
MPI_Finalize();  /* EXIT MPI */

}
我插入了一些打印图,试图理解我的代码哪里出了故障,看起来它进入了workers和rank 0根中的实际矩阵乘法部分。这是否意味着我的收信有问题?输入是一个1 2 3 4 5 6的2x3矩阵和一个7 8 9 10 11 12的3x2,下面是输出的样子:
hjiang1@cook:~/cs287/PMatrixMultiply$ make
mpicc parallelMatrixMult.c -std=c99 -lm -o parallelMatrix.out
hjiang1@cook:~/cs287/PMatrixMultiply$ mpirun --hostfile QuaCS parallelMatrix.out
No protocol specified
No protocol specified
bring workers
a
bring workers
a
bring workers
a
begin rank 0
1
2
b
c
b
c
b
c
3
d
e
d
3.5
[cook:06730] *** Process received signal ***
[cook:06730] Signal: Segmentation fault (11)
[cook:06730] Signal code: Address not mapped (1)
[cook:06730] Failing at address: 0xffffffffbbc4d600
[cook:06728] *** Process received signal ***
[cook:06728] Signal: Segmentation fault (11)
[cook:06728] Signal code: Address not mapped (1)
[cook:06728] Failing at address: 0x5d99f200
[cook:06727] *** Process received signal ***
[cook:06730] [ 0] /lib/x86_64-linux-gnu/libpthread.so.0(+0xfcb0)[0x7fdaa80eccb0]
[cook:06730] [ 1] [cook:06728] [ 0] /lib/x86_64-linux-gnu/libc.so.6(+0x147b55)[0x7fdaa7e65b55]
[cook:06730] [ 2] /usr/local/lib/openmpi/mca_btl_vader.so(+0x23f9)[0x7fda9e70f3f9]
[cook:06730] [ 3] /usr/local/lib/openmpi/mca_pml_ob1.so(mca_pml_ob1_send_request_start_rndv+0x1d3)[0x7fda9e0df393]
[cook:06730] [ 4] /usr/local/lib/openmpi/mca_pml_ob1.so(mca_pml_ob1_send+0x754)[0x7fda9e0d5404]
[cook:06730] [ 5] /lib/x86_64-linux-gnu/libpthread.so.0(+0xfcb0)[0x7f910bef2cb0]
[cook:06728] [ 1] parallelMatrix.out[0x400bad]
[cook:06728] [ 2] parallelMatrix.out[0x401448]
[cook:06728] [ 3] /lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xed)[0x7f910bb4576d]
[cook:06728] [ 4] parallelMatrix.out[0x400a79]
[cook:06728] *** End of error message ***
/usr/local/lib/libmpi.so.1(PMPI_Send+0xf2)[0x7fdaa8368332]
[cook:06730] [ 6] parallelMatrix.out[0x401492]
[cook:06730] [ 7] /lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xed)[0x7fdaa7d3f76d]
[cook:06730] [ 8] parallelMatrix.out[0x400a79]
[cook:06730] *** End of error message ***
[cook:06727] Signal: Segmentation fault (11)
[cook:06727] Signal code: Address not mapped (1)
[cook:06727] Failing at address: (nil)
[cook:06727] [ 0] /lib/x86_64-linux-gnu/libpthread.so.0(+0xfcb0)[0x7f73e0d09cb0]
[cook:06727] [ 1] parallelMatrix.out[0x400bad]
[cook:06727] [ 2] [cook:6729] *** An error occurred in MPI_Recv
[cook:6729] *** reported by process [1864040449,2]
[cook:6729] *** on communicator MPI_COMM_WORLD
[cook:6729] *** MPI_ERR_COUNT: invalid count argument
[cook:6729] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
[cook:6729] ***    and potentially your MPI job)

如果有人能帮忙,我将不胜感激。再说一遍,我对C和MPI还不熟悉,所以请容忍我的代码有多糟糕。

最佳答案

同样的错误我看到一次又一次的重复。使用MPI时,请使用平面数组,即将矩阵分配为一个连续的内存块,而不是分别分配每一行,也就是说,不要:

int **A = (int **)malloc(rows_A * sizeof(int*));
for(i = 0; i < rows_A; i++)
    A[i] = (int *)malloc(cols_A * sizeof(int));

您应该使用:
int **A = malloc(rows_A * sizeof(int*));
// The cast to size_t prevents integer overflow with big matrices
A[0] = malloc((size_t)rows_A * (size_t)cols_A * sizeof(int));
for(i = 1; i < rows_A; i++)
    A[i] = A[0] + i*cols_A;

释放这样一个矩阵就像:
free(A[0]);
free(A);

也就是说,代码中还有另一类错误:
MPI_Recv(A+(i*interval), ...);
MPI_Send(A+(i*interval), ...);

A是指向每一行的指针数组。A+i是指向该数组第i个元素的指针。因此,传递MPI的不是内存中行数据的实际地址,而是指向指向该数据的指针的指针。正确的表达式(假设您已经在前面概述的单个块中分配了内存)是:
MPI_Recv(A[i*interval], ...);


MPI_Recv(*(A + i*interval), ...);

换句话说,array[index]等同于*(array + index)而不是array + index

09-09 21:07
查看更多