Parallel Merge Sort using MPI

笑着哭i 提交于 2021-02-08 10:12:52

问题


i implemented Parallel Merge sort in this code using the tree Structural scheme. but it doesn't sort the Array! could you take look at it and tell me what wrong is ? for communication among the processor i used the normal MPI_send() and MPI_recv(). however i used numbers -0- and -1- and -2- as tags for the fifth argument of MPI_recv() . for 8 processors the tree structural scheme gives the Array to the processor with rank-0- then it splits the array in half an gives the right half to processor-4- and keeps the left half. then the processor-4- splits its array in half an gives the right half to processor-6- and keeps the left half. at the end with this scheme all the processors work an the program and none of them will idle. since at the leaves of the tree all the processors have a piece of Array to do sequential Merge_sort_inc on it.

1text

#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>

/* print_array() takes the  elements of Array to the output */

void print_array(int arr[], int size)
{
    for (int i = 0; i < size; i++)
        printf("%d ", arr[i]);
    printf("\n");
}

/*copyarray() takes as first argument an Array a[] which its elements 
between indexes start_a and end_a are to be copied to the dynamic Array *b with size of (size_b)  */
void copyarray(int a[] ,int start_a , int end_a, int* b, int size_b)
{
    int i = 0;
    for (i = 0; i < size_b;i++)
    {
        b[i] = a[start_a];
        start_a++;
        if (start_a == end_a)
            break;
    }
}
/* merge () function is just the sequential implementation of merge Sort Algorithm */
void merge(int Arr[], int left, int mid, int right)
{

    int n_l = (mid - left + 1);
    int n_r = (right - mid);
    int* Arr_l = (int*)calloc(n_l, sizeof(int));
    int* Arr_r = (int*)calloc(n_r, sizeof(int));
    if (Arr_l == NULL)
        return;
    if (Arr_r == NULL)
        return;

    for (int i = 0;i < n_l;i++)
        Arr_l[i] = Arr[left + i];

    for (int j = 0;j < n_r;j++)
        Arr_r[j] = Arr[mid + 1 + j];

    int i = 0, j = 0, k = left;
    while (i < n_l && j < n_r)
    {
        if (Arr_l[i] <= Arr_r[j])
        {
            Arr[k] = Arr_l[i];
            i++;
            k++;
        }
        else
        {
            Arr[k] = Arr_r[j];
            j++;
            k++;
        }
    }

    while (i < n_l)
    {
        Arr[k] = Arr_l[i];
        i++;
        k++;
    }

    while (j < n_r)
    {
        Arr[k] = Arr_r[j];
        j++;
        k++;
    }
    free(Arr_l);
    free(Arr_r);
}

/*merge_sort_inc() is the sequential algorithm of sorting in increasing order*/
void merge_sort_inc(int Arr[], int left, int right)
{
    int mid = (int)(left + (right - left) / 2);
    if (left < right)
    {
        merge_sort_inc(Arr, left, mid);
        merge_sort_inc(Arr, mid + 1, right - 1);
        merge(Arr, left, mid, right);
    }

}
/*parallelMerge() builds first the tree-structural communication between the processors. at the leafs of the tree,
  where there is no more divide and concure progress the Function gives the the processor the sequential Merge sort algorithm*/
void parallelMerge(int* array, int size, int height)
{
    int parent;
    int rank;
    int numberOfproc;
    int next;
    int rightChild;
    //MPI_Init(NULL, NULL);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &numberOfproc);

    parent = rank & ~(1 << height);
    next = height - 1;
    rightChild = rank | (1 << (height - 1));

    if (height > 0)
        if (rightChild >= numberOfproc)
            parallelMerge(array, size, next);
        else
        {
            int left_size = (int)(size / 2);
            int right_size = size - left_size;
            int* leftArray = (int*)calloc(left_size, sizeof(int));
            int * rightArray = (int*)calloc(right_size,sizeof(int));
            if (leftArray == NULL)
                return;
            if (rightArray == NULL)
                return;

            int massage[2];
            int i, j , k;
            MPI_Status status;
            copyarray(array, 0, left_size, leftArray, left_size);
            copyarray(array, size - left_size, size, rightArray,right_size);
            massage[0] = next;
            massage[1] = right_size;
            MPI_Send(massage, 2, MPI_INT, rightChild,0, MPI_COMM_WORLD);
            MPI_Send(rightArray, right_size, MPI_INT, rightChild, 1, MPI_COMM_WORLD);

            parallelMerge(leftArray, left_size, next);
            MPI_Recv(rightArray, right_size, MPI_INT, rightChild, 2, MPI_COMM_WORLD, &status);


            i = j = k = 0;
            while (i < left_size && j < right_size)
            {
                if (leftArray[i] < rightArray[j])
                {
                    array[k] = leftArray[i]; i++, k++;
                }
                else
                {
                    array[k] = rightArray[j]; j++, k++;
                }
            }
            while (i<left_size)
            {
                array[k] = leftArray[i];
                k++;
                i++;
            }

            while (j<right_size)
            {
                array[k] = rightArray[j];
                k++;
                j++;
            }
        }
    else
    {
        merge_sort_inc(array, 0 ,size);
        if (parent != rank)
            MPI_Send(array, size, MPI_INT, parent, 2, MPI_COMM_WORLD);
    }

}


/////////////////////////////////////////////////////////////////////////////////////////////
int main()
{
    /*building an array with the help of Random function*/
    time_t t;
    srand((unsigned)time(&t));
    int Arr[100];
    int arrSize = sizeof(Arr) / sizeof(int);
    for (int i = 0; i < arrSize; i++)
        Arr[i] = rand() / 100;
    printf("the unsorted array is : \n ");
    print_array(Arr, arrSize);

    /*starting the parallel sorting*/ 
    int rank; 
    int comm_size;
    MPI_Init(NULL,NULL);
    MPI_Comm_rank(MPI_COMM_WORLD , &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
    double start = MPI_Wtime();//capture time
    if (rank == 0)
    {
        int roothight = 0; 
        int nodeCount = 1;
        while (nodeCount < comm_size)
            nodeCount++;

        roothight = (int)log(nodeCount);

        int* newarray = (int*)calloc(arrSize, sizeof(int));
        if (newarray == NULL)
            return 1;
        copyarray(Arr, 0, arrSize - 1, newarray, arrSize );


        parallelMerge(newarray, arrSize, roothight);
        double midle = MPI_Wtime();
    }
    else
    {
        int massage[2];
        int height;
        int size_array;
        MPI_Status status;
        MPI_Recv(massage, 2, MPI_INT, MPI_ANY_SOURCE,0, MPI_COMM_WORLD, &status);
        height = massage[0];
        size_array = massage[1];
        int* newarray = (int*)calloc(size_array, sizeof(int));
        if (newarray == NULL)
            return 1;
        MPI_Recv(newarray, size_array, MPI_INT, MPI_ANY_SOURCE,1, MPI_COMM_WORLD, &status);
        parallelMerge(newarray, size_array, height);
    }

    double end = MPI_Wtime();
    MPI_Finalize();
    printf("\n the sorted array is : \n");
    print_array(Arr, arrSize);
    printf("\n the sorting takes %lf time ", (end - start));
    return 0;
}

来源:https://stackoverflow.com/questions/61910607/parallel-merge-sort-using-mpi

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!