'Parallel Array Addition on MPI
MPI_Init(&argc, &argv);
int size,rank,i,*a,*b,*c,N;
double t1,t2;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
srand(time(NULL));
if(rank==0)
{
printf("Input an integer for arrays size\n");
scanf("%d",&N);
a = (int*) malloc((N)* sizeof(int));
b = (int*) malloc((N) * sizeof(int));
c = (int*) malloc((N)* sizeof(int));
for(i=0;i<N;i++)
{
a[i]=rand()%100+1;
b[i]=rand()%100+1;
c[i]=0;
printf("c[%d]= %d , a[i]= %d , b[i] = %d \n",i, c[i],a[i],b[i]);
}
}
t1=MPI_Wtime();
MPI_Bcast(&N,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&a,N,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&b,N,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&c,N,MPI_INT,0,MPI_COMM_WORLD);
printf("\n\n\n\n");
for(i=rank;i<N;i+=size)
{
printf("entered rank %d \n",rank);
c[i]=a[i]+b[i];
printf("c[%d]= %d , a[i]= %d , b[i] = %d \n",i, c[i],a[i],b[i]);
}
t2=MPI_Wtime();
if(rank==0)
printf("time elapsed %.8f \n" ,t2-t1);
MPI_Finalize();
}
I want to write a parallel program with MPI Library. When I run this code with just one core no problem. but one more than core I came across an run time error something like that is shown below. rank 0 is working very well. other ranks do not work.
Solution 1:[1]
You need to malloc() all your arrays a, b and c on all the ranks (e.g. MPI_Bcast() does not do it for you on non root ranks).
From a performance and memory footprint point of view, you'd rather MPI_Scatterv() the arrays so the operations can be vectorized without striding access.
Solution 2:[2]
I am beginner but maybe you could use this:
#include <iostream>
#include <mpi.h>
#define MASTER 0
int main(int argc, char* argv[])
{
int N = 0; //array size
int my_rank, world_size;
double* a{};
double* b{};
double* c{};
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
if (my_rank == MASTER) {
std::cout << "number of processes = " << world_size << std::endl;
}
if (my_rank == MASTER) {
std::cout << "array size: "; std::cin >> N;
a = new double[N] {};
b = new double[N] {};
c = new double[N] {};
}
MPI_Bcast(&N, 1, MPI_INT, MASTER, MPI_COMM_WORLD);
if (my_rank == MASTER) {
for (int i = 0; i < N; i++) {
a[i] = (double)i + 1.0;
b[i] = (double)i + 1.0;
}
for (auto i = 0; i < N; i++) {
std::cout << "a[" << i << "] = " << a[i] <<
"\t" << "b[" << i << "] = " << b[i] <<
std::endl;
}
std::cout << "============================" << std::endl;
}
int* workloads = new int[world_size];
int* displs = new int[world_size];
int increment = 0;
for (int i = 0; i < world_size; i++) {
displs[i] = increment;
workloads[i] = N / world_size;
if (i < N % world_size) workloads[i]++;
increment += workloads[i];
}
bool nowPrint = false;
for (int j = 0; j < world_size; j++) {
if(j == my_rank)
std::cout << "workload on rank #" << my_rank << " : " << workloads[my_rank] << std::endl;
if (j == world_size-1) {
nowPrint = true;
}
MPI_Barrier(MPI_COMM_WORLD);
}
int n = workloads[my_rank];
double* ap(new double[n] {});
double* bp(new double[n] {});
double* cp(new double[n] {});
MPI_Scatterv(a, workloads, displs, MPI_DOUBLE, ap, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Scatterv(b, workloads, displs, MPI_DOUBLE, bp, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
for (auto i = 0; i < n; i++) {
cp[i] = ap[i] + bp[i];
}
MPI_Gatherv(cp, n, MPI_DOUBLE, c, workloads, displs, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if (my_rank == MASTER) {
if (nowPrint) {
std::cout << "============================\n";
for (auto i = 0; i < N; i++) {
std::cout << "c[" << i << "] = " << c[i] << std::endl;
}
}
}
// free heap on the root process
if (my_rank == MASTER) {
delete[] a;
a = nullptr;
delete[] b;
b = nullptr;
delete[] c;
c = nullptr;
}
//
// free heap for all processes
delete[] ap;
ap = nullptr;
delete[] bp;
bp = nullptr;
delete[] cp;
cp = nullptr;
delete[] workloads;
workloads = nullptr;
delete[] displs;
displs = nullptr;
//
MPI_Finalize();
return 0;
}
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source |
|---|---|
| Solution 1 | Gilles Gouaillardet |
| Solution 2 | Peter Csala |
