MPI Sending array of array

那年仲夏 提交于 2019-12-07 14:06:44

问题


ok so I am trying to send a structure like so over MPI

struct BColumns {
        double **B;
        int offset;
};

And if I just do some BS allocation of data like so

    bSet.offset = myRank;
    bSet.B = (double **) calloc(2, sizeof(double *));
    bSet.B[0] = (double *) calloc(1, sizeof(double));
    bSet.B[1] = (double *) calloc(1, sizeof(double));

    bSet.B[0][0] = 1;
    bSet.B[1][0] = 2;


    if(myRank == 0){
            MPI_Send(&bSet,sizeof(struct BColumns), MPI_BYTE, 1, 1, MPI_COMM_WORLD);
    }else{
            MPI_Recv(&recvBuf, sizeof(struct BColumns), MPI_BYTE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status );
    }

And I am assuming that its not going to work well because if I send this structure as is it will just send the pointer in B and that pointer doesn't point to anything on the other processor, so how would I go about sending data like this in MPI.


回答1:


As suszterpatt points out, you really want to allocate your block of B in one big chunk; that's probably better for performance anyway but it's really required for any communications so you're not chasing pointers everywhere. And I think one way or another you're probably going to have to do it in different sends -- sending size information, then the data in one chunk -- although you could probably create and delete a different MPI_Type_struct for every one of these you send. But using multiple sends per object isn't very hard:

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>

typedef struct BColumns {
        double **B;
        int offset;
} bc;

double **alloc2d(int n, int m) {
    double *data = malloc(n*m*sizeof(double));
    double **array = malloc(n*sizeof(double *));
    for (int i=0; i<n; i++) {
        array[i] = &(data[i*m]);
    }
    return array;
}

void free2d(double **array) {
    free(array[0]);
    free(array);
}

int main(int argc, char **argv) {

    const int tag = 13;
    int size, rank;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    if (size < 2) {
        fprintf(stderr,"Requires at least two processes.\n");
        exit(-1);
    }

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    if (rank == 0) {
        int ncols=3, colsize=5;
        bc *send;

        send = malloc(sizeof(bc));
        send->offset = 1;
        send->B = alloc2d(ncols, colsize);
        for (int i=0; i<ncols; i++)
            for (int j=0; j<colsize; j++)
                send->B[i][j] = i*j;

        const int dest = 1;
        MPI_Send(&ncols,   1, MPI_INT, dest, tag, MPI_COMM_WORLD);
        MPI_Send(&colsize, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);
        MPI_Send(&(send->offset), 1, MPI_INT, dest, tag, MPI_COMM_WORLD);
        MPI_Send(&(send->B[0][0]), ncols*colsize, MPI_DOUBLE, dest, tag,
                 MPI_COMM_WORLD);


        printf("Rank %d: sent structure B\n", rank);
        free2d(send->B);
        free(send);
    }

    if (rank == 1) {
        MPI_Status status;
        const int src=0;
        int rncols, rcolsize;
        bc *recv;

        MPI_Recv(&rncols,   1, MPI_INT, src, tag, MPI_COMM_WORLD, &status);
        MPI_Recv(&rcolsize, 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status);
        printf("Rank %d: Received: rncols = %d rcolsize=%d\n", rank, rncols, rcolsize);

        recv = malloc(sizeof(bc));
        recv->B = alloc2d(rncols, rcolsize);

        MPI_Recv(&(recv->offset), 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status);
        MPI_Recv(&(recv->B[0][0]), rncols*rcolsize, MPI_DOUBLE, src, tag,
                MPI_COMM_WORLD, &status);

        printf("Rank %d: Received: offset = %d\n", rank, recv->offset);
        for (int i=0; i<rncols; i++) {
            printf("%d:  Column %d/%d: ", rank, i, rncols);
            for (int j=0; j<rcolsize; j++)
                printf(" %lf ", recv->B[i][j]);
            printf("\n");
        }

        free2d(recv->B);
        free(recv);
    }

    MPI_Finalize();

    return 0;
}

And then running it:

$ mpirun -np 3 ./bstruct
Rank 0: sent structure B
Rank 1: Received: rncols = 3 rcolsize=5
Rank 1: Received: offset = 1
1:  Column 0/3:  0.000000  0.000000  0.000000  0.000000  0.000000 
1:  Column 1/3:  0.000000  1.000000  2.000000  3.000000  4.000000 
1:  Column 2/3:  0.000000  2.000000  4.000000  6.000000  8.000000 

You could marshall that data into one message if you wanted to avoid the latency of several sends (and if you knew before hand a maximum size for the B array) either by hand or using MPI function calls or data types, but you'd still have to do it in a similar way.




回答2:


The easiest way is to use a single array to store your values in row/column-major order, so that it's all contiguous in memory. Then you just need to define an MPI datatype that describes the struct's memory layout (lots of doubles and an int).



来源:https://stackoverflow.com/questions/9507987/mpi-sending-array-of-array

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!