HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
MPIGatherArraynD.c File Reference

Gather an n-dimensional array in to a global array. More...

#include <stdio.h>
#include <stdlib.h>
#include <basic.h>
#include <arrayfunctions.h>
#include <mpivars.h>

Go to the source code of this file.

Functions

int MPIGatherArraynD (int ndims, void *m, double *xg, double *x, int *dim_global, int *dim_local, int ghosts, int nvars)
 
int MPIGatherArraynDwGhosts (int ndims, void *m, double *xg, double *x, int *dim_global, int *dim_local, int ghosts, int nvars)
 

Detailed Description

Gather an n-dimensional array in to a global array.

Author
Debojyoti Ghosh

Definition in file MPIGatherArraynD.c.

Function Documentation

int MPIGatherArraynD ( int  ndims,
void *  m,
double *  xg,
double *  x,
int *  dim_global,
int *  dim_local,
int  ghosts,
int  nvars 
)

Gathers the contents of an n-dimensional array, partitioned amongst the MPI ranks, in to a global array on rank 0. See documentation of MPIExchangeBoundariesnD() for how the n-dimensional array is stored in the memory as a single-index array.

Notes:

  • The global array must have no ghost points.
  • The global array must be allocated only on rank 0. On other ranks, it must be NULL.
Parameters
ndimsNumber of spatial dimensions
mMPI object of type MPIVariables
xgGlobal array (preallocated) without ghost points
xLocal array
dim_globalInteger array with elements as global size along each spatial dimension
dim_localInteger array with elements as local size along each spatial dimension
ghostsNumber of ghost points
nvarsNumber of variables (vector components)

Definition at line 21 of file MPIGatherArraynD.c.

31 {
32  MPIVariables *mpi = (MPIVariables*) m;
33  int d, size;
35 
36  int is[ndims], ie[ndims], index[ndims], bounds[ndims];
37 
38  /* xg should be non-null only on root */
39  if (mpi->rank && xg) {
40  fprintf(stderr,"Error in MPIGatherArraynD(): global array exists on non-root processors (rank %d).\n",
41  mpi->rank);
42  return(1);
43  }
44  if ((!mpi->rank) && (!xg)) {
45  fprintf(stderr,"Error in MPIGatherArraynD(): global array is not allocated on root processor.\n");
46  return(1);
47  }
48 
49  /* calculate total size of local domain (w/o ghosts) */
50  size = 1;
51  for (d = 0; d < ndims; d++) size *= dim_local[d];
52 
53  /* create and copy data to send to root process */
54  double *buffer = (double*) calloc (size*nvars, sizeof(double));
55  IERR ArrayCopynD(ndims,x,buffer,dim_local,ghosts,0,index,nvars); CHECKERR(ierr);
56 
57  if (!mpi->rank) {
58  int proc;
59  for (proc = 0; proc < mpi->nproc; proc++) {
60  int d,done,size;
61  /* Find out the domain limits for each process */
62  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie); CHECKERR(ierr);
63  size = 1;
64  for (d=0; d<ndims; d++) {
65  size *= (ie[d]-is[d]);
66  bounds[d] = ie[d] - is[d];
67  }
68  if (proc) {
69 #ifndef serial
70  MPI_Status status;
71  double *recvbuf = (double*) calloc (size*nvars, sizeof(double));
72  MPI_Recv(recvbuf,size*nvars,MPI_DOUBLE,proc,1902,mpi->world,&status);
73  int done = 0; _ArraySetValue_(index,ndims,0);
74  while (!done) {
75  int p1; _ArrayIndex1D_(ndims,bounds,index,0,p1);
76  int p2; _ArrayIndex1DWO_(ndims,dim_global,index,is,0,p2);
77  _ArrayCopy1D_((recvbuf+nvars*p1),(xg+nvars*p2),nvars);
78  _ArrayIncrementIndex_(ndims,bounds,index,done);
79  }
80  free(recvbuf);
81 #endif
82  } else {
83  done = 0; _ArraySetValue_(index,ndims,0);
84  while (!done) {
85  int p1; _ArrayIndex1D_(ndims,bounds,index,0,p1);
86  int p2; _ArrayIndex1DWO_(ndims,dim_global,index,is,0,p2);
87  _ArrayCopy1D_((buffer+nvars*p1),(xg+nvars*p2),nvars);
88  _ArrayIncrementIndex_(ndims,bounds,index,done);
89  }
90  }
91  }
92 
93  } else {
94 #ifndef serial
95  /* Meanwhile, on other processes */
96  MPI_Send(buffer,size*nvars,MPI_DOUBLE,0,1902,mpi->world);
97 #endif
98  }
99 
100  free(buffer);
101  return(0);
102 }
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
INLINE int ArrayCopynD(int, const double *, double *, int *, int, int, int *, int)
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
#define CHECKERR(ierr)
Definition: basic.h:18
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17
int MPIGatherArraynDwGhosts ( int  ndims,
void *  m,
double *  xg,
double *  x,
int *  dim_global,
int *  dim_local,
int  ghosts,
int  nvars 
)

Gathers the contents of an n-dimensional array, partitioned amongst the MPI ranks, in to a global array on rank 0. See documentation of MPIExchangeBoundariesnD() for how the n-dimensional array is stored in the memory as a single-index array. This is same as MPIGatherArraynD() but where the global array has ghost points.

Notes:

  • The global array must have ghost points (the same number as the local arrays).
  • The global array must be allocated only on rank 0. On other ranks, it must be NULL.
Parameters
ndimsNumber of spatial dimensions
mMPI object of type MPIVariables
xgGlobal array (preallocated) with ghost points
xLocal array
dim_globalInteger array with elements as global size along each spatial dimension
dim_localInteger array with elements as local size along each spatial dimension
ghostsNumber of ghost points
nvarsNumber of variables (vector components)

Definition at line 115 of file MPIGatherArraynD.c.

125 {
126  MPIVariables *mpi = (MPIVariables*) m;
127  int d, size;
129 
130  /* do an exchange to make sure interior/periodic ghost points are filled with consistent values */
131  IERR MPIExchangeBoundariesnD(ndims, nvars, dim_local, ghosts, mpi, x);
132 
133  int is[ndims], ie[ndims], index[ndims], bounds[ndims];
134  int dim_global_wghosts[ndims];
135  for (d = 0; d < ndims; d++) dim_global_wghosts[d] = dim_global[d] + 2*ghosts;
136 
137  /* xg should be non-null only on root */
138  if (mpi->rank && xg) {
139  fprintf(stderr,"Error in MPIGatherArraynD(): global array exists on non-root processors (rank %d).\n",
140  mpi->rank);
141  return(1);
142  }
143  if ((!mpi->rank) && (!xg)) {
144  fprintf(stderr,"Error in MPIGatherArraynD(): global array is not allocated on root processor.\n");
145  return(1);
146  }
147 
148  /* calculate total size of local domain (w/ ghosts) */
149  size = 1;
150  for (d = 0; d < ndims; d++) size *= (dim_local[d]+2*ghosts);
151 
152  /* create and copy data (incl. ghost points) to send to root process */
153  double *buffer = (double*) calloc (size*nvars, sizeof(double));
154  _ArrayCopy1D_(x, buffer, size*nvars);
155 
156  if (!mpi->rank) {
157  int proc;
158  for (proc = 0; proc < mpi->nproc; proc++) {
159  int d,done;
160  /* Find out the domain limits for each process */
161  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie); CHECKERR(ierr);
162  for (d=0; d<ndims; d++) {
163  bounds[d] = ie[d] - is[d];
164  }
165  if (proc) {
166 #ifndef serial
167  int size = 1;
168  for (d=0; d<ndims; d++) {
169  size *= (ie[d]-is[d]+2*ghosts);
170  bounds[d] = ie[d] - is[d];
171  }
172  MPI_Status status;
173  double *recvbuf = (double*) calloc (size*nvars, sizeof(double));
174  MPI_Recv(recvbuf,size*nvars,MPI_DOUBLE,proc,1902,mpi->world,&status);
175  int done = 0; _ArraySetValue_(index,ndims,0);
176  while (!done) {
177  int p1; _ArrayIndex1D_(ndims,bounds,index,ghosts,p1);
178  int p2; _ArrayIndex1DWO_(ndims,dim_global,index,is,ghosts,p2);
179  _ArrayCopy1D_((recvbuf+nvars*p1),(xg+nvars*p2),nvars);
180  _ArrayIncrementIndex_(ndims,bounds,index,done);
181  }
182  free(recvbuf);
183 #endif
184  } else {
185  done = 0; _ArraySetValue_(index,ndims,0);
186  while (!done) {
187  int p1; _ArrayIndex1D_(ndims,bounds,index,ghosts,p1);
188  int p2; _ArrayIndex1DWO_(ndims,dim_global,index,is,ghosts,p2);
189  _ArrayCopy1D_((buffer+nvars*p1),(xg+nvars*p2),nvars);
190  _ArrayIncrementIndex_(ndims,bounds,index,done);
191  }
192  }
193  }
194 
195  } else {
196 #ifndef serial
197  /* Meanwhile, on other processes */
198  MPI_Send(buffer,size*nvars,MPI_DOUBLE,0,1902,mpi->world);
199 #endif
200  }
201 
202  free(buffer);
203  return(0);
204 }
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
#define CHECKERR(ierr)
Definition: basic.h:18
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
#define IERR
Definition: basic.h:16
int MPIExchangeBoundariesnD(int, int, int *, int, void *, double *)
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17