HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
mpivars.h File Reference

MPI related function definitions. More...

#include <mpivars_struct.h>

Go to the source code of this file.

Functions

int MPIBroadcast_double (double *, int, int, void *)
 
int MPIBroadcast_integer (int *, int, int, void *)
 
int MPIBroadcast_character (char *, int, int, void *)
 
int MPICreateCommunicators (int, void *)
 
int MPIFreeCommunicators (int, void *)
 
int MPICreateIOGroups (void *)
 
int MPIExchangeBoundaries1D (void *, double *, int, int, int, int)
 
int MPIExchangeBoundariesnD (int, int, int *, int, void *, double *)
 
int MPIGatherArray1D (void *, double *, double *, int, int, int, int)
 
int MPIGatherArraynD (int, void *, double *, double *, int *, int *, int, int)
 
int MPIGatherArraynDwGhosts (int, void *, double *, double *, int *, int *, int, int)
 
int MPIPartitionArraynD (int, void *, double *, double *, int *, int *, int, int)
 
int MPIPartitionArraynDwGhosts (int, void *, double *, double *, int *, int *, int, int)
 
int MPIPartitionArray1D (void *, double *, double *, int, int, int, int)
 
int MPIGetArrayDatanD (double *, double *, int *, int *, int *, int *, int, int, int, void *)
 
int MPILocalDomainLimits (int, int, void *, int *, int *, int *)
 
int MPIMax_integer (int *, int *, int, void *)
 
int MPIMax_long (long *, long *, int, void *)
 
int MPIMax_double (double *, double *, int, void *)
 
int MPIMin_integer (int *, int *, int, void *)
 
int MPIMin_double (double *, double *, int, void *)
 
int MPISum_double (double *, double *, int, void *)
 
int MPISum_integer (int *, int *, int, void *)
 
int MPIPartition1D (int, int, int)
 
int MPIRank1D (int, int *, int *)
 
int MPIRanknD (int, int, int *, int *)
 
void MPIGetFilename (char *, void *, char *)
 
int gpuMPIExchangeBoundariesnD (int, int, const int *, int, void *, double *)
 

Detailed Description

MPI related function definitions.

Author
Debojyoti Ghosh

Definition in file mpivars.h.

Function Documentation

int MPIBroadcast_double ( double *  x,
int  size,
int  root,
void *  comm 
)

Broadcast a double to all ranks

Broadcast an array of type double to all MPI ranks

Parameters
xarray to broadcast to all ranks
sizesize of array to broadcast
rootrank from which to broadcast
commMPI communicator within which to broadcast

Definition at line 9 of file MPIBroadcast.c.

15 {
16 #ifndef serial
17  MPI_Bcast(x,size,MPI_DOUBLE,root,*((MPI_Comm*)comm));
18 #endif
19  return(0);
20 }
int MPIBroadcast_integer ( int *  x,
int  size,
int  root,
void *  comm 
)

Broadcast an integer to all ranks

Broadcast an array of type int to all MPI ranks

Parameters
xarray to broadcast to all ranks
sizesize of array to broadcast
rootrank from which to broadcast
commMPI communicator within which to broadcast

Definition at line 23 of file MPIBroadcast.c.

29 {
30 #ifndef serial
31  MPI_Bcast(x,size,MPI_INT,root,*((MPI_Comm*)comm));
32 #endif
33  return(0);
34 }
int MPIBroadcast_character ( char *  x,
int  size,
int  root,
void *  comm 
)

Broadcast a character to all ranks

Broadcast an array of type char to all MPI ranks

Parameters
xarray to broadcast to all ranks
sizesize of array to broadcast
rootrank from which to broadcast
commMPI communicator within which to broadcast

Definition at line 37 of file MPIBroadcast.c.

43 {
44 #ifndef serial
45  MPI_Bcast(x,size,MPI_CHAR,root,*((MPI_Comm*)comm));
46 #endif
47  return(0);
48 }
int MPICreateCommunicators ( int  ndims,
void *  m 
)

Create communicators required for the tridiagonal solver in compact schemes

Create subcommunicators from MPI_WORLD, where each subcommunicator contains MPI ranks along a spatial dimension. Consider a two-dimensional problem, partitioned on 21 MPI ranks as follows:

mpi_ranks.png

This function will create 10 subcommunicators with the following ranks:

  • 0,1,2,3,4,5,6
  • 7,8,9,10,11,12,13
  • 14,15,16,17,18,19,20
  • 0,7,14
  • 1,8,15
  • 2,9,16
  • 3,10,17
  • 4,11,18
  • 5,12,19
  • 6,13,20

These subcommunicators are useful for parallel computations along grid lines. For example, a compact finite-difference scheme solves implicit systems along grid lines in every spatial dimension. Thus, the subcommunicator may be passed on to the parallel systems solver instead of MPI_WORLD.

Parameters
ndimsNumber of spatial dimensions
mMPI object of type MPIVariables

Definition at line 35 of file MPICommunicators.c.

39 {
40  MPIVariables *mpi = (MPIVariables*) m;
41 #ifdef serial
42  mpi->comm = NULL;
43 #else
44  int i,n,color,key;
45  int *ip,*iproc;
46 
47  mpi->comm = (MPI_Comm*) calloc (ndims, sizeof(MPI_Comm));
48  if (ndims == 1) MPI_Comm_dup(mpi->world,mpi->comm);
49  else {
50  ip = (int*) calloc (ndims-1,sizeof(int));
51  iproc = (int*) calloc (ndims-1,sizeof(int));
52  for (n=0; n<ndims; n++) {
53  int tick=0;
54  for (i=0; i<ndims; i++) {
55  if (i != n) {
56  ip[tick] = mpi->ip[i];
57  iproc[tick] = mpi->iproc[i];
58  tick++;
59  }
60  }
61  _ArrayIndex1D_(ndims-1,iproc,ip,0,color);
62  key = mpi->ip[n];
63  MPI_Comm_split(mpi->world,color,key,&mpi->comm[n]);
64  }
65  free(ip);
66  free(iproc);
67  }
68 #endif
69  return(0);
70 }
MPI_Comm * comm
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
Structure of MPI-related variables.
int MPIFreeCommunicators ( int  ndims,
void *  m 
)

Free/destroy communicators created

Free the subcommunicators created in MPICreateCommunicators().

Parameters
ndimsNumber of spatial dimensions
mMPI object of type MPIVariables

Definition at line 75 of file MPICommunicators.c.

79 {
80 #ifndef serial
81  MPIVariables *mpi = (MPIVariables*) m;
82  int n;
83  for (n=0; n<ndims; n++) MPI_Comm_free(&mpi->comm[n]);
84  free(mpi->comm);
85  if (mpi->IOParticipant) MPI_Comm_free(&mpi->IOWorld);
86  MPI_Comm_free(&mpi->world);
87 #endif
88  return(0);
89 }
MPI_Comm * comm
MPI_Comm world
MPI_Comm IOWorld
Structure of MPI-related variables.
int MPICreateIOGroups ( void *  m)

Create I/O groups for file reading and writing – Group leaders gather data from all other ranks in the group and write to file, and read from file and sends the data to the appropriate ranks in the group. Thus, number of ranks participating in file I/O is equal to the number of groups (which is an input), and can be set to the number of I/O nodes available.

Create I/O groups of MPI ranks: A scalable approach to file I/O when running simulations on a large number of processors (>10,000) is partitioning all the MPI ranks into I/O group. Each group has a "leader" that:

  • Input - reads the local data of each member rank from the file and sends it to that member.
  • Output - gets the local data of each member rank and writes it to a file.

The number of I/O groups (and hence, the number of I/O ranks reading and writing to files) is specified through MPIVariables::N_IORanks. Ideally, this would correspond to the number of I/O nodes available for the total number of compute nodes being used on a HPC platform.

Two extreme cases are:

  • Number of I/O ranks is 1, i.e., just one rank (typically rank 0) is responsible for reading and writing the local data of every rank.
  • Number of I/O ranks is equal to the total number of MPI ranks, i.e., each MPI rank reads and writes from and to its own files.

Neither of the extreme cases are scalable.

Notes:

Parameters
mMPI object of type MPIVariables

Definition at line 37 of file MPIIOGroups.c.

38 {
39  MPIVariables *mpi = (MPIVariables*) m;
40 
41 #ifndef serial
42 
43  int nproc = mpi->nproc;
44  int rank = mpi->rank;
45  int N_IORanks = mpi->N_IORanks;
46 
47  int GroupSize;
48  if (nproc%N_IORanks==0) GroupSize = nproc/N_IORanks;
49  else {
50  if (!rank) {
51  printf("Note: in MPICreateIOGroups() - Number of ");
52  printf("ranks (nproc) is not divisible by number ");
53  printf("of IO ranks (N_IORanks). Readjusting number ");
54  printf("of IO ranks to fix this.\n");
55  }
56  N_IORanks = 1;
57  if (!rank) {
58  printf("Number of IO Ranks: %d\n",N_IORanks);
59  }
60  GroupSize = nproc;
61  }
62 
63  mpi->CommGroup = rank/GroupSize;
64  mpi->IORank = mpi->CommGroup * GroupSize;
65 
66  /* set flag for whether this rank does file I/O */
67  if (rank == mpi->IORank) mpi->IOParticipant = 1;
68  else mpi->IOParticipant = 0;
69 
70  /* save the first and last process of this group */
71  mpi->GroupStartRank = mpi->IORank;
72  mpi->GroupEndRank = (mpi->CommGroup+1)*GroupSize;
73 
74  /* create a new communicator with the IO participants */
75  int i,*FileIORanks;
76  MPI_Group WorldGroup, IOGroup;
77  FileIORanks = (int*) calloc (N_IORanks,sizeof(int));
78  for (i=0; i<N_IORanks; i++) FileIORanks[i] = i*GroupSize;
79  MPI_Comm_group(mpi->world,&WorldGroup);
80  MPI_Group_incl(WorldGroup,N_IORanks,FileIORanks,&IOGroup);
81  MPI_Comm_create(mpi->world,IOGroup,&mpi->IOWorld);
82  MPI_Group_free(&IOGroup);
83  MPI_Group_free(&WorldGroup);
84  free(FileIORanks);
85 
86 #endif
87 
88  return(0);
89 }
MPI_Comm world
MPI_Comm IOWorld
Structure of MPI-related variables.
int MPIExchangeBoundaries1D ( void *  m,
double *  x,
int  N,
int  ghosts,
int  dir,
int  ndims 
)

Exchange boundary (ghost point) values for an essentially 1D array (like grid coordinates)

Exchange the data across MPI ranks and fill in ghost points for a 1D array. In a multidimensional simulation, a 1D array is an array of data along one of the spatial dimensions, i.e. its an array storing a variable that varies in only one of the spatial dimension. For example, for a 2D problem on a Cartesian grid (with spatial dimensions x and y), the array of x-coordinates is a 1D array along x, and the array of y-coordinates is a 1D array along y. Thus, the size of the 1D array is equal to the size of the domain along the spatial dimension corresponding to that array.

Consider a two-dimensional problem, partitioned on 21 MPI ranks as follows:

mpi_ranks.png
and consider rank 9.

If the argument dir is specified as 0, and thus we are dealing with a 1D array along dimension 0, then

  • Rank 9 will exchange data with ranks 8 and 10, and fill in its ghost points.

If dir is specified as 1, and thus we are dealing with a 1D array along dimension 1, then

  • Rank 9 will exchange data with ranks 2 and 16, and fill in its ghost points.
Parameters
mMPI object of type MPIVariables
xThe 1D array for which to exchange data
NSize of the array
ghostsNumber of ghost points
dirSpatial dimension corresponding to the 1D array
ndimsNumber of spatial dimensions in the simulation

Definition at line 32 of file MPIExchangeBoundaries1D.c.

40 {
41 #ifndef serial
42  MPIVariables *mpi = (MPIVariables*) m;
43  int i;
44 
45  int *ip = mpi->ip;
46  int *iproc = mpi->iproc;
47  int non = 0; /* number of neighbours */
48 
49  int neighbor_rank[2] = {-1,-1};
50  int nip[ndims];
51 
52  /* each process has 2 neighbors (except at physical boundaries) */
53  /* calculate the rank of these neighbors (-1 -> none) */
54  _ArrayCopy1D_(ip,nip,ndims); nip[dir]--;
55  if (ip[dir] == 0) neighbor_rank[0] = -1;
56  else neighbor_rank[0] = MPIRank1D(ndims,iproc,nip);
57  _ArrayCopy1D_(ip,nip,ndims); nip[dir]++;
58  if (ip[dir] == (iproc[dir]-1))neighbor_rank[1] = -1;
59  else neighbor_rank[1] = MPIRank1D(ndims,iproc,nip);
60 
61  /* Allocate send and receive buffers */
62  double sendbuf[2][ghosts], recvbuf[2][ghosts];
63 
64  /* count number of neighbors and copy data to send buffers */
65  non = 0;
66  if (neighbor_rank[0] != -1) {
67  non++;
68  for (i = 0; i < ghosts; i++) sendbuf[0][i] = x[i+ghosts];
69  }
70  if (neighbor_rank[1] != -1) {
71  non++;
72  for (i = 0; i < ghosts; i++) sendbuf[1][i] = x[i+N];
73  }
74  MPI_Request requests[2*non];
75  MPI_Status statuses[2*non];
76 
77  /* exchange the data */
78  int tick = 0;
79  if (neighbor_rank[0]!= -1) {
80  MPI_Irecv(recvbuf[0],ghosts,MPI_DOUBLE,neighbor_rank[0],1631,mpi->world,&requests[tick]);
81  MPI_Isend(sendbuf[0],ghosts,MPI_DOUBLE,neighbor_rank[0],1631,mpi->world,&requests[tick+non]);
82  tick++;
83  }
84  if (neighbor_rank[1] != -1) {
85  MPI_Irecv(recvbuf[1],ghosts,MPI_DOUBLE,neighbor_rank[1],1631,mpi->world,&requests[tick]);
86  MPI_Isend(sendbuf[1],ghosts,MPI_DOUBLE,neighbor_rank[1],1631,mpi->world,&requests[tick+non]);
87  tick++;
88  }
89 
90  /* Wait till data transfer is done */
91  MPI_Waitall(2*non,requests,statuses);
92 
93  /* copy received data to ghost points */
94  if (neighbor_rank[0] != -1) for (i = 0; i < ghosts; i++) x[i] = recvbuf[0][i];
95  if (neighbor_rank[1] != -1) for (i = 0; i < ghosts; i++) x[i+N+ghosts] = recvbuf[1][i];
96 
97 #endif
98  return(0);
99 }
int MPIRank1D(int, int *, int *)
Definition: MPIRank1D.c:26
MPI_Comm world
#define _ArrayCopy1D_(x, y, size)
Structure of MPI-related variables.
int MPIExchangeBoundariesnD ( int  ndims,
int  nvars,
int *  dim,
int  ghosts,
void *  m,
double *  var 
)

Exchange boundary (ghost point) values for an n-dimensional array (like the solution array)

Exchange data across MPI ranks, and fill in ghost points for an n-dimensional array (where n is the total number of spatial dimensions). If any of the physical boundaries are periodic, this function also exchanges data and fills in the ghost points for these boundaries.

The n-dimensional array must be stored in the memory as a single-index array, with the following order of mapping:

  • Number of variables (vector components)
  • Spatial dimension 0
  • Spatial dimension 1
  • ...
  • Spatial dimensions ndims-1

For example, consider a 2D simulation (ndims = 2), of size \(7 \times 3\), with \(4\) vector components (nvars = 4). The following figure shows the layout (without the ghost points):

layout.png

The bold numbers in parentheses represent the 2D indices. The numbers below them are the indices of the array that correspond to that 2D location. Thus, elements 40,41,42, and 43 in the array are the 1st, 2nd, 3rd, and 4th vector components at location (1,3).

If \({\bf i}\left[{\rm ndims}\right]\) is an integer array representing an n-dimensional index (for example, \(\left(5,4\right)\) in 2D, \(\left(3,5,2\right)\) in 3D), and the number of vector components is nvars, then:

  • _ArrayIndex1D_ computes the index \(p\) in the array corresponding to \({\bf i}\). In the above example, \({\bf i} = \left(1,3\right) \rightarrow p = 10\).
  • var[nvars*p+v] accesses the v-th component of the n-dimensional array var at location \({\bf i}\). In the above example, to access the 3rd vector component at location \(\left(1,3\right)\), we have \(p=10\), so var [4*10+2] = var [42].
Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables (vector components) at each grid location
dimInteger array whose elements are the local size along each spatial dimension
ghostsNumber of ghost points
mMPI object of type MPIVariables
varThe array for which to exchange data and fill in ghost points

Definition at line 42 of file MPIExchangeBoundariesnD.c.

50 {
51 #ifndef serial
52  MPIVariables *mpi = (MPIVariables*) m;
53  int d;
54 
55  int *ip = mpi->ip;
56  int *iproc = mpi->iproc;
57  int *bcflag = mpi->bcperiodic;
58 
59  int neighbor_rank[2*ndims], nip[ndims], index[ndims], bounds[ndims], offset[ndims];
60  MPI_Request rcvreq[2*ndims], sndreq[2*ndims];
61  for (d=0; d<2*ndims; d++) rcvreq[d] = sndreq[d] = MPI_REQUEST_NULL;
62 
63  /* each process has 2*ndims neighbors (except at non-periodic physical boundaries) */
64  /* calculate the rank of these neighbors (-1 -> none) */
65  for (d = 0; d < ndims; d++) {
66  _ArrayCopy1D_(ip,nip,ndims);
67  if (ip[d] == 0) nip[d] = iproc[d]-1;
68  else nip[d]--;
69  if ((ip[d] == 0) && (!bcflag[d])) neighbor_rank[2*d] = -1;
70  else neighbor_rank[2*d] = MPIRank1D(ndims,iproc,nip);
71  _ArrayCopy1D_(ip,nip,ndims);
72  if (ip[d] == (iproc[d]-1)) nip[d] = 0;
73  else nip[d]++;
74  if ((ip[d] == (iproc[d]-1)) && (!bcflag[d])) neighbor_rank[2*d+1] = -1;
75  else neighbor_rank[2*d+1] = MPIRank1D(ndims,iproc,nip);
76  }
77 
78  /* calculate dimensions of each of the send-receive regions */
79  double *sendbuf = mpi->sendbuf;
80  double *recvbuf = mpi->recvbuf;
81  int stride = mpi->maxbuf;
82  int bufdim[ndims];
83  for (d = 0; d < ndims; d++) {
84  bufdim[d] = 1;
85  int i;
86  for (i = 0; i < ndims; i++) {
87  if (i == d) bufdim[d] *= ghosts;
88  else bufdim[d] *= dim[i];
89  }
90  }
91 
92  /* post the receive requests */
93  for (d = 0; d < ndims; d++) {
94  if (neighbor_rank[2*d ] != -1) {
95  MPI_Irecv(&recvbuf[2*d*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d ],1630,
96  mpi->world,&rcvreq[2*d]);
97  }
98  if (neighbor_rank[2*d+1] != -1) {
99  MPI_Irecv(&recvbuf[(2*d+1)*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d+1],1631,
100  mpi->world,&rcvreq[2*d+1]);
101  }
102  }
103 
104  /* count number of neighbors and copy data to send buffers */
105  for (d = 0; d < ndims; d++) {
106  _ArrayCopy1D_(dim,bounds,ndims); bounds[d] = ghosts;
107  if (neighbor_rank[2*d] != -1) {
108  _ArraySetValue_(offset,ndims,0);
109  int done = 0; _ArraySetValue_(index,ndims,0);
110  while (!done) {
111  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
112  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
113  _ArrayCopy1D_((var+nvars*p1),(sendbuf+2*d*stride+nvars*p2),nvars);
114  _ArrayIncrementIndex_(ndims,bounds,index,done);
115  }
116  }
117  if (neighbor_rank[2*d+1] != -1) {
118  _ArraySetValue_(offset,ndims,0);offset[d] = dim[d]-ghosts;
119  int done = 0; _ArraySetValue_(index,ndims,0);
120  while (!done) {
121  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
122  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
123  _ArrayCopy1D_((var+nvars*p1),(sendbuf+(2*d+1)*stride+nvars*p2),nvars);
124  _ArrayIncrementIndex_(ndims,bounds,index,done);
125  }
126  }
127  }
128 
129  /* send the data */
130  for (d = 0; d < ndims; d++) {
131  if (neighbor_rank[2*d ] != -1) {
132  MPI_Isend(&sendbuf[2*d*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d ],1631,
133  mpi->world,&sndreq[2*d]);
134  }
135  if (neighbor_rank[2*d+1] != -1) {
136  MPI_Isend(&sendbuf[(2*d+1)*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d+1],1630,
137  mpi->world,&sndreq[2*d+1]);
138  }
139  }
140 
141  /* Wait till data is done received */
142  MPI_Status status_arr[2*ndims];
143  MPI_Waitall(2*ndims,rcvreq,status_arr);
144  /* copy received data to ghost points */
145  for (d = 0; d < ndims; d++) {
146  _ArrayCopy1D_(dim,bounds,ndims); bounds[d] = ghosts;
147  if (neighbor_rank[2*d] != -1) {
148  _ArraySetValue_(offset,ndims,0); offset[d] = -ghosts;
149  int done = 0; _ArraySetValue_(index,ndims,0);
150  while (!done) {
151  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
152  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
153  _ArrayCopy1D_((recvbuf+2*d*stride+nvars*p2),(var+nvars*p1),nvars);
154  _ArrayIncrementIndex_(ndims,bounds,index,done);
155  }
156  }
157  if (neighbor_rank[2*d+1] != -1) {
158  _ArraySetValue_(offset,ndims,0); offset[d] = dim[d];
159  int done = 0; _ArraySetValue_(index,ndims,0);
160  while (!done) {
161  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
162  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
163  _ArrayCopy1D_((recvbuf+(2*d+1)*stride+nvars*p2),(var+nvars*p1),nvars);
164  _ArrayIncrementIndex_(ndims,bounds,index,done);
165  }
166  }
167  }
168  /* Wait till send requests are complete before freeing memory */
169  MPI_Waitall(2*ndims,sndreq,status_arr);
170 
171 #endif
172  return(0);
173 }
int MPIRank1D(int, int *, int *)
Definition: MPIRank1D.c:26
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
double * sendbuf
double * recvbuf
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
Structure of MPI-related variables.
int MPIGatherArray1D ( void *  m,
double *  xg,
double *  x,
int  istart,
int  iend,
int  N_local,
int  ghosts 
)

Gather local arrays into a global array for an essentially 1D array

Gathers the contents of a 1D array (partitioned amongst MPI ranks) into a global 1D array on the root rank (rank 0). See documentation of MPIExchangeBoundaries1D() on what a "1D array" is in the context of a multidimensional simulation. The 1D array must be the same along spatial dimensions normal to the one it represents.

Notes:

  • The global array must not have ghost points.
  • The global array must be preallocated on only rank 0. On other ranks, it must be NULL.
  • Since this function deals with a 1D array, more than one rank may be sending the same piece of data to rank 0 (i.e. if there are more than one MPI rank along the dimensions normal to one corresponding to x ). The implementation of this function ignores this and overwrites that portion with the latest data sent.
Parameters
mMPI object of type MPIVariables
xgGlobal 1D array (must be preallocated) without ghost points
xLocal 1D array to be gathered
istartStarting index (global) of this rank's portion of the array
iendEnding index (global) of this rank's portion of the array + 1
N_localLocal size of the array
ghostsNumber of ghost points

Definition at line 26 of file MPIGatherArray1D.c.

35 {
36  MPIVariables *mpi = (MPIVariables*) m;
37  int ierr = 0;
38 
39  /* xg should be non-null only on root */
40  if (mpi->rank && xg) {
41  fprintf(stderr,"Error in MPIGatherArray1D(): global array exists on non-root processors (rank %d).\n",
42  mpi->rank);
43  ierr = 1;
44  }
45  if ((!mpi->rank) && (!xg)) {
46  fprintf(stderr,"Error in MPIGatherArray1D(): global array is not allocated on root processor.\n");
47  ierr = 1;
48  }
49 
50  /* create and copy data to a buffer to send to the root process */
51  double *buffer = (double*) calloc (N_local,sizeof(double));
52  _ArrayCopy1D_((x+ghosts),(buffer),N_local);
53 
54  if (!mpi->rank) {
55 #ifndef serial
56  MPI_Status status;
57 #endif
58  int proc;
59  for (proc = 0; proc < mpi->nproc; proc++) {
60  /* Find out the domain limits for each process */
61  int is,ie;
62  if (proc) {
63 #ifndef serial
64  MPI_Recv(&is,1,MPI_INT,proc,1442,mpi->world,&status);
65  MPI_Recv(&ie,1,MPI_INT,proc,1443,mpi->world,&status);
66 #endif
67  } else { is = istart; ie = iend; }
68  int size = ie - is;
69  if (proc) {
70 #ifndef serial
71  double *recvbuf = (double*) calloc (size,sizeof(double));
72  MPI_Recv(recvbuf,size,MPI_DOUBLE,proc,1916,mpi->world,&status);
73  _ArrayCopy1D_((recvbuf),(xg+is),size);
74  free(recvbuf);
75 #endif
76  } else _ArrayCopy1D_(buffer,(xg+is),size);
77  }
78 
79  } else {
80 #ifndef serial
81  /* Meanwhile, on other processes - send stuff to root */
82  MPI_Send(&istart,1 ,MPI_INT ,0,1442,mpi->world);
83  MPI_Send(&iend ,1 ,MPI_INT ,0,1443,mpi->world);
84  MPI_Send(buffer ,N_local,MPI_DOUBLE,0,1916,mpi->world);
85 #endif
86  }
87 
88  free(buffer);
89  return(ierr);
90 }
MPI_Comm world
#define _ArrayCopy1D_(x, y, size)
Structure of MPI-related variables.
int MPIGatherArraynD ( int  ndims,
void *  m,
double *  xg,
double *  x,
int *  dim_global,
int *  dim_local,
int  ghosts,
int  nvars 
)

Gather local arrays into a global array for an n-dimensional array

Gathers the contents of an n-dimensional array, partitioned amongst the MPI ranks, in to a global array on rank 0. See documentation of MPIExchangeBoundariesnD() for how the n-dimensional array is stored in the memory as a single-index array.

Notes:

  • The global array must have no ghost points.
  • The global array must be allocated only on rank 0. On other ranks, it must be NULL.
Parameters
ndimsNumber of spatial dimensions
mMPI object of type MPIVariables
xgGlobal array (preallocated) without ghost points
xLocal array
dim_globalInteger array with elements as global size along each spatial dimension
dim_localInteger array with elements as local size along each spatial dimension
ghostsNumber of ghost points
nvarsNumber of variables (vector components)

Definition at line 21 of file MPIGatherArraynD.c.

31 {
32  MPIVariables *mpi = (MPIVariables*) m;
33  int d, size;
35 
36  int is[ndims], ie[ndims], index[ndims], bounds[ndims];
37 
38  /* xg should be non-null only on root */
39  if (mpi->rank && xg) {
40  fprintf(stderr,"Error in MPIGatherArraynD(): global array exists on non-root processors (rank %d).\n",
41  mpi->rank);
42  return(1);
43  }
44  if ((!mpi->rank) && (!xg)) {
45  fprintf(stderr,"Error in MPIGatherArraynD(): global array is not allocated on root processor.\n");
46  return(1);
47  }
48 
49  /* calculate total size of local domain (w/o ghosts) */
50  size = 1;
51  for (d = 0; d < ndims; d++) size *= dim_local[d];
52 
53  /* create and copy data to send to root process */
54  double *buffer = (double*) calloc (size*nvars, sizeof(double));
55  IERR ArrayCopynD(ndims,x,buffer,dim_local,ghosts,0,index,nvars); CHECKERR(ierr);
56 
57  if (!mpi->rank) {
58  int proc;
59  for (proc = 0; proc < mpi->nproc; proc++) {
60  int d,done,size;
61  /* Find out the domain limits for each process */
62  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie); CHECKERR(ierr);
63  size = 1;
64  for (d=0; d<ndims; d++) {
65  size *= (ie[d]-is[d]);
66  bounds[d] = ie[d] - is[d];
67  }
68  if (proc) {
69 #ifndef serial
70  MPI_Status status;
71  double *recvbuf = (double*) calloc (size*nvars, sizeof(double));
72  MPI_Recv(recvbuf,size*nvars,MPI_DOUBLE,proc,1902,mpi->world,&status);
73  int done = 0; _ArraySetValue_(index,ndims,0);
74  while (!done) {
75  int p1; _ArrayIndex1D_(ndims,bounds,index,0,p1);
76  int p2; _ArrayIndex1DWO_(ndims,dim_global,index,is,0,p2);
77  _ArrayCopy1D_((recvbuf+nvars*p1),(xg+nvars*p2),nvars);
78  _ArrayIncrementIndex_(ndims,bounds,index,done);
79  }
80  free(recvbuf);
81 #endif
82  } else {
83  done = 0; _ArraySetValue_(index,ndims,0);
84  while (!done) {
85  int p1; _ArrayIndex1D_(ndims,bounds,index,0,p1);
86  int p2; _ArrayIndex1DWO_(ndims,dim_global,index,is,0,p2);
87  _ArrayCopy1D_((buffer+nvars*p1),(xg+nvars*p2),nvars);
88  _ArrayIncrementIndex_(ndims,bounds,index,done);
89  }
90  }
91  }
92 
93  } else {
94 #ifndef serial
95  /* Meanwhile, on other processes */
96  MPI_Send(buffer,size*nvars,MPI_DOUBLE,0,1902,mpi->world);
97 #endif
98  }
99 
100  free(buffer);
101  return(0);
102 }
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
INLINE int ArrayCopynD(int, const double *, double *, int *, int, int, int *, int)
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
#define CHECKERR(ierr)
Definition: basic.h:18
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17
int MPIGatherArraynDwGhosts ( int  ndims,
void *  m,
double *  xg,
double *  x,
int *  dim_global,
int *  dim_local,
int  ghosts,
int  nvars 
)

Gather local arrays into a global array for an n-dimensional array (with ghosts)

Gathers the contents of an n-dimensional array, partitioned amongst the MPI ranks, in to a global array on rank 0. See documentation of MPIExchangeBoundariesnD() for how the n-dimensional array is stored in the memory as a single-index array. This is same as MPIGatherArraynD() but where the global array has ghost points.

Notes:

  • The global array must have ghost points (the same number as the local arrays).
  • The global array must be allocated only on rank 0. On other ranks, it must be NULL.
Parameters
ndimsNumber of spatial dimensions
mMPI object of type MPIVariables
xgGlobal array (preallocated) with ghost points
xLocal array
dim_globalInteger array with elements as global size along each spatial dimension
dim_localInteger array with elements as local size along each spatial dimension
ghostsNumber of ghost points
nvarsNumber of variables (vector components)

Definition at line 115 of file MPIGatherArraynD.c.

125 {
126  MPIVariables *mpi = (MPIVariables*) m;
127  int d, size;
129 
130  /* do an exchange to make sure interior/periodic ghost points are filled with consistent values */
131  IERR MPIExchangeBoundariesnD(ndims, nvars, dim_local, ghosts, mpi, x);
132 
133  int is[ndims], ie[ndims], index[ndims], bounds[ndims];
134  int dim_global_wghosts[ndims];
135  for (d = 0; d < ndims; d++) dim_global_wghosts[d] = dim_global[d] + 2*ghosts;
136 
137  /* xg should be non-null only on root */
138  if (mpi->rank && xg) {
139  fprintf(stderr,"Error in MPIGatherArraynD(): global array exists on non-root processors (rank %d).\n",
140  mpi->rank);
141  return(1);
142  }
143  if ((!mpi->rank) && (!xg)) {
144  fprintf(stderr,"Error in MPIGatherArraynD(): global array is not allocated on root processor.\n");
145  return(1);
146  }
147 
148  /* calculate total size of local domain (w/ ghosts) */
149  size = 1;
150  for (d = 0; d < ndims; d++) size *= (dim_local[d]+2*ghosts);
151 
152  /* create and copy data (incl. ghost points) to send to root process */
153  double *buffer = (double*) calloc (size*nvars, sizeof(double));
154  _ArrayCopy1D_(x, buffer, size*nvars);
155 
156  if (!mpi->rank) {
157  int proc;
158  for (proc = 0; proc < mpi->nproc; proc++) {
159  int d,done;
160  /* Find out the domain limits for each process */
161  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie); CHECKERR(ierr);
162  for (d=0; d<ndims; d++) {
163  bounds[d] = ie[d] - is[d];
164  }
165  if (proc) {
166 #ifndef serial
167  int size = 1;
168  for (d=0; d<ndims; d++) {
169  size *= (ie[d]-is[d]+2*ghosts);
170  bounds[d] = ie[d] - is[d];
171  }
172  MPI_Status status;
173  double *recvbuf = (double*) calloc (size*nvars, sizeof(double));
174  MPI_Recv(recvbuf,size*nvars,MPI_DOUBLE,proc,1902,mpi->world,&status);
175  int done = 0; _ArraySetValue_(index,ndims,0);
176  while (!done) {
177  int p1; _ArrayIndex1D_(ndims,bounds,index,ghosts,p1);
178  int p2; _ArrayIndex1DWO_(ndims,dim_global,index,is,ghosts,p2);
179  _ArrayCopy1D_((recvbuf+nvars*p1),(xg+nvars*p2),nvars);
180  _ArrayIncrementIndex_(ndims,bounds,index,done);
181  }
182  free(recvbuf);
183 #endif
184  } else {
185  done = 0; _ArraySetValue_(index,ndims,0);
186  while (!done) {
187  int p1; _ArrayIndex1D_(ndims,bounds,index,ghosts,p1);
188  int p2; _ArrayIndex1DWO_(ndims,dim_global,index,is,ghosts,p2);
189  _ArrayCopy1D_((buffer+nvars*p1),(xg+nvars*p2),nvars);
190  _ArrayIncrementIndex_(ndims,bounds,index,done);
191  }
192  }
193  }
194 
195  } else {
196 #ifndef serial
197  /* Meanwhile, on other processes */
198  MPI_Send(buffer,size*nvars,MPI_DOUBLE,0,1902,mpi->world);
199 #endif
200  }
201 
202  free(buffer);
203  return(0);
204 }
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
#define CHECKERR(ierr)
Definition: basic.h:18
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
#define IERR
Definition: basic.h:16
int MPIExchangeBoundariesnD(int, int, int *, int, void *, double *)
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17
int MPIPartitionArraynD ( int  ndims,
void *  m,
double *  xg,
double *  x,
int *  dim_global,
int *  dim_local,
int  ghosts,
int  nvars 
)

Partition a global array into local arrays for an n-dimensional array

Partitions the contents of a global n-dimensional array on rank 0 (root) to local n-dimensional arrays on all the MPI ranks. See documentation of MPIExchangeBoundariesnD() for how the n-dimensional array is stored in the memory as a single-index array.

Notes:

  • The global array must have no ghost points.
  • The global array must be allocated only on rank 0. On other ranks, it must be NULL.
Parameters
ndimsNumber of spatial dimensions
mMPI object of type MPIVariables
xgGlobal array (preallocated) without ghost points
xLocal array
dim_globalInteger array with elements as global size along each spatial dimension
dim_localInteger array with elements as local size along each spatial dimension
ghostsNumber of ghost points
nvarsNumber of variables (vector components)

Definition at line 21 of file MPIPartitionArraynD.c.

31 {
32  MPIVariables *mpi = (MPIVariables*) m;
34 
35  int is[ndims], ie[ndims], index[ndims], bounds[ndims];
36 
37  /* xg should be non-null only on root */
38  if (mpi->rank && xg) {
39  fprintf(stderr,"Error in MPIPartitionArraynD(): global array exists on non-root processors (rank %d).\n",
40  mpi->rank);
41  return(1);
42  }
43  if ((!mpi->rank) && (!xg)) {
44  fprintf(stderr,"Error in MPIPartitionArraynD(): global array is not allocated on root processor.\n");
45  return(1);
46  }
47 
48  if (!mpi->rank) {
49  int proc;
50  for (proc = 0; proc < mpi->nproc; proc++) {
51  int d,done,size;
52  /* Find out the domain limits for each process */
53  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie); CHECKERR(ierr);
54  size = 1;
55  for (d=0; d<ndims; d++) {
56  size *= (ie[d]-is[d]);
57  bounds[d] = ie[d] - is[d];
58  }
59  double *buffer = (double*) calloc (size*nvars, sizeof(double));
60  done = 0; _ArraySetValue_(index,ndims,0);
61  while (!done) {
62  int p1; _ArrayIndex1DWO_(ndims,dim_global,index,is,0,p1);
63  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
64  _ArrayCopy1D_((xg+nvars*p1),(buffer+nvars*p2),nvars);
65  _ArrayIncrementIndex_(ndims,bounds,index,done);
66  }
67  if (proc) {
68 #ifndef serial
69  MPI_Send(buffer,size*nvars,MPI_DOUBLE,proc,1538,mpi->world);
70 #endif
71  } else {
72  done = 0; _ArraySetValue_(index,ndims,0);
73  while (!done) {
74  int p1; _ArrayIndex1D_(ndims,dim_local,index,ghosts,p1);
75  int p2; _ArrayIndex1D_(ndims,dim_local,index,0,p2);
76  _ArrayCopy1D_((buffer+nvars*p2),(x+nvars*p1),nvars);
77  _ArrayIncrementIndex_(ndims,dim_local,index,done);
78  }
79  }
80  free(buffer);
81  }
82 
83  } else {
84 #ifndef serial
85  /* Meanwhile, on other processes */
86  MPI_Status status;
87  int d, done, size;
88  size = 1; for (d=0; d<ndims; d++) size *= dim_local[d];
89  double *buffer = (double*) calloc (size*nvars, sizeof(double));
90  MPI_Recv(buffer,size*nvars,MPI_DOUBLE,0,1538,mpi->world,&status);
91  done = 0; _ArraySetValue_(index,ndims,0);
92  while (!done) {
93  int p1; _ArrayIndex1D_(ndims,dim_local,index,ghosts,p1);
94  int p2; _ArrayIndex1D_(ndims,dim_local,index,0,p2);
95  _ArrayCopy1D_((buffer+nvars*p2),(x+nvars*p1),nvars);
96  _ArrayIncrementIndex_(ndims,dim_local,index,done);
97  }
98  free(buffer);
99 #endif
100  }
101  return(0);
102 }
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
#define CHECKERR(ierr)
Definition: basic.h:18
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17
int MPIPartitionArraynDwGhosts ( int  ndims,
void *  m,
double *  xg,
double *  x,
int *  dim_global,
int *  dim_local,
int  ghosts,
int  nvars 
)

Partition a global array into local arrays for an n-dimensional array

Partitions the contents of a global n-dimensional array on rank 0 (root) to local n-dimensional arrays on all the MPI ranks. See documentation of MPIExchangeBoundariesnD() for how the n-dimensional array is stored in the memory as a single-index array. This is same as MPIPartitionArraynD() but where the global array has ghost points.

Notes:

  • The global array must have ghost points (the same number as the local arrays).
  • The global array must be allocated only on rank 0. On other ranks, it must be NULL.
Parameters
ndimsNumber of spatial dimensions
mMPI object of type MPIVariables
xgGlobal array (preallocated) without ghost points
xLocal array
dim_globalInteger array with elements as global size along each spatial dimension
dim_localInteger array with elements as local size along each spatial dimension
ghostsNumber of ghost points
nvarsNumber of variables (vector components)

Definition at line 114 of file MPIPartitionArraynD.c.

124 {
125  MPIVariables *mpi = (MPIVariables*) m;
126  int d;
128 
129  int is[ndims], ie[ndims], index[ndims], bounds[ndims];
130  int dim_global_wghosts[ndims];
131  for (d = 0; d < ndims; d++) dim_global_wghosts[d] = dim_global[d] + 2*ghosts;
132 
133  /* xg should be non-null only on root */
134  if (mpi->rank && xg) {
135  fprintf(stderr,"Error in MPIPartitionArraynD(): global array exists on non-root processors (rank %d).\n",
136  mpi->rank);
137  return(1);
138  }
139  if ((!mpi->rank) && (!xg)) {
140  fprintf(stderr,"Error in MPIPartitionArraynD(): global array is not allocated on root processor.\n");
141  return(1);
142  }
143 
144  if (!mpi->rank) {
145  int proc;
146  for (proc = 0; proc < mpi->nproc; proc++) {
147  int done,size;
148  /* Find out the domain limits for each process */
149  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie); CHECKERR(ierr);
150  size = 1;
151  for (d=0; d<ndims; d++) {
152  size *= (ie[d]-is[d]+2*ghosts);
153  bounds[d] = ie[d] - is[d] + 2*ghosts;
154  }
155  double *buffer = (double*) calloc (size*nvars, sizeof(double));
156  done = 0; _ArraySetValue_(index,ndims,0);
157  while (!done) {
158  int p1; _ArrayIndex1DWO_(ndims,dim_global_wghosts,index,is,0,p1);
159  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
160  _ArrayCopy1D_((xg+nvars*p1),(buffer+nvars*p2),nvars);
161  _ArrayIncrementIndex_(ndims,bounds,index,done);
162  }
163  if (proc) {
164 #ifndef serial
165  MPI_Send(buffer,size*nvars,MPI_DOUBLE,proc,1538,mpi->world);
166 #endif
167  } else {
168  done = 0; _ArraySetValue_(index,ndims,0);
169  _ArrayCopy1D_(buffer, x, size*nvars);
170  }
171  free(buffer);
172  }
173 
174  } else {
175 #ifndef serial
176  /* Meanwhile, on other processes */
177  MPI_Status status;
178  int done, size;
179  size = 1; for (d=0; d<ndims; d++) size *= (dim_local[d]+2*ghosts);
180  double *buffer = (double*) calloc (size*nvars, sizeof(double));
181  MPI_Recv(buffer,size*nvars,MPI_DOUBLE,0,1538,mpi->world,&status);
182  _ArrayCopy1D_(buffer, x, size*nvars);
183  free(buffer);
184 #endif
185  }
186  return(0);
187 }
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
#define CHECKERR(ierr)
Definition: basic.h:18
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17
int MPIPartitionArray1D ( void *  m,
double *  xg,
double *  x,
int  istart,
int  iend,
int  N_local,
int  ghosts 
)

Partition a global array into local arrays for an essentially 1D array

Partitions the contents of a global 1D array on the root rank (rank 0) to local arrays on all the MPI ranks. See documentation of MPIExchangeBoundaries1D() on what a "1D array" is in the context of a multidimensional simulation. The 1D array must be the same along spatial dimensions normal to the one it represents.

Notes:

  • The global array must not have ghost points.
  • The global array must be preallocated on only rank 0. On other ranks, it must be NULL.
  • Since this function deals with a 1D array, more than one rank may be receiving the same piece of data from rank 0 (i.e. if there are more than one MPI rank along the dimensions normal to one corresponding to x ).
Parameters
mMPI object of type MPIVariables
xgGlobal 1D array (must be preallocated) without ghost points
xLocal 1D array to be gathered
istartStarting index (global) of this rank's portion of the array
iendEnding index (global) of this rank's portion of the array + 1
N_localLocal size of the array
ghostsNumber of ghost points

Definition at line 25 of file MPIPartitionArray1D.c.

34 {
35  MPIVariables *mpi = (MPIVariables*) m;
36  int ierr = 0;
37 #ifndef serial
38  MPI_Status status;
39 #endif
40 
41  /* xg should be non-null only on root */
42  if (mpi->rank && xg) {
43  fprintf(stderr,"Error in MPIPartitionArray1D(): global array exists on non-root processors (rank %d).\n",
44  mpi->rank);
45  ierr = 1;
46  }
47  if ((!mpi->rank) && (!xg)) {
48  fprintf(stderr,"Error in MPIPartitionArray1D(): global array is not allocated on root processor.\n");
49  ierr = 1;
50  }
51 
52  if (!mpi->rank) {
53  int proc;
54  for (proc = 0; proc < mpi->nproc; proc++) {
55  /* Find out the domain limits for each process */
56  int is,ie;
57  if (proc) {
58 #ifndef serial
59  MPI_Recv(&is,1,MPI_INT,proc,1442,mpi->world,&status);
60  MPI_Recv(&ie,1,MPI_INT,proc,1443,mpi->world,&status);
61 #endif
62  } else {
63  is = istart;
64  ie = iend;
65  }
66  int size = ie - is;
67  double *buffer = (double*) calloc (size, sizeof(double));
68  _ArrayCopy1D_((xg+is),buffer,size);
69  if (proc) {
70 #ifndef serial
71  MPI_Send(buffer,size,MPI_DOUBLE,proc,1539,mpi->world);
72 #endif
73  } else _ArrayCopy1D_(buffer,x,N_local);
74  free(buffer);
75  }
76 
77  } else {
78 #ifndef serial
79  /* Meanwhile, on other processes */
80  /* send local start and end indices to root */
81  MPI_Send(&istart,1,MPI_INT,0,1442,mpi->world);
82  MPI_Send(&iend ,1,MPI_INT,0,1443,mpi->world);
83  double *buffer = (double*) calloc (N_local, sizeof(buffer));
84  MPI_Recv(buffer,N_local,MPI_DOUBLE,0,1539,mpi->world,&status);
85  _ArrayCopy1D_(buffer,x,N_local);
86  free(buffer);
87 #endif
88  }
89  return(ierr);
90 }
MPI_Comm world
#define _ArrayCopy1D_(x, y, size)
Structure of MPI-related variables.
int MPIGetArrayDatanD ( double *  xbuf,
double *  x,
int *  source,
int *  dest,
int *  limits,
int *  dim,
int  ghosts,
int  ndims,
int  nvars,
void *  m 
)

fetch data from an n-dimensional local array on another rank

This function lets one rank get a portion of a local n-dimensional array on another rank. The n-dimensional array must be stored in the memory as a single-index array as described in the documentation of MPIExchangeBoundariesnD(). The source rank sends to the dest rank a logically rectangular n-dimensional portion of its local copy of an array x. The extent of this logically rectangular portion is defined by limits.

  • limits is an array of size 2x the number of spatial dimensions, with elements as: [ is[0], ie[0], is[1], ie[1], ..., is[ndims-1], ie[ndims-1] ], where is[n] is the starting index along spatial dimension n, and ie[n] is the end index (+1) along spatial dimension n.
Parameters
xbufpreallocated memory on destination rank to hold the received data
xlocal array of which a part is needed
sourceMPI rank of the source
destMPI rank of the destination
limitsInteger array (of size 2*ndims) with the start and end indices along each spatial dimension of the desired portion of the array
dimInteger array whose elements are the local size of x in each spatial dimension
ghostsNumber of ghost points
ndimsNumber of spatial dimensions
nvarsNumber of variables (vector components)
mMPI object of type MPIVariables

Definition at line 21 of file MPIGetArrayDatanD.c.

34 {
35  MPIVariables *mpi = (MPIVariables*) m;
36  int d;
37 
38  int source_rank = MPIRank1D(ndims,mpi->iproc,source);
39  int dest_rank = MPIRank1D(ndims,mpi->iproc,dest );
40 
41  int is[ndims], ie[ndims], index[ndims], bounds[ndims], size;
42  size = 1;
43  for (d=0; d<ndims; d++) {
44  is[d] = limits[2*d ];
45  ie[d] = limits[2*d+1];
46  size *= (ie[d] - is[d]);
47  }
48  _ArraySubtract1D_(bounds,ie,is,ndims);
49 
50  if (source_rank == dest_rank) {
51  /* source and dest are the same process */
52  int done = 0; _ArraySetValue_(index,ndims,0);
53  while (!done) {
54  int p1; _ArrayIndex1D_(ndims,bounds,index,0,p1);
55  int p2; _ArrayIndex1DWO_(ndims,dim,index,is,ghosts,p2);
56  _ArrayCopy1D_((x+nvars*p2),(xbuf+nvars*p1),nvars);
57  _ArrayIncrementIndex_(ndims,bounds,index,done);
58  }
59  } else {
60 #ifdef serial
61  fprintf(stderr,"Error in MPIGetArrayDatanD(): This is a serial run. Source and ");
62  fprintf(stderr,"destination ranks must be equal. Instead they are %d and %d.\n",
63  source_rank,dest_rank);
64  return(1);
65 #else
66  if (mpi->rank == source_rank) {
67  double *buf = (double*) calloc (size*nvars, sizeof(double));
68  int done = 0; _ArraySetValue_(index,ndims,0);
69  while (!done) {
70  int p1; _ArrayIndex1D_(ndims,bounds,index,0,p1);
71  int p2; _ArrayIndex1DWO_(ndims,dim,index,is,ghosts,p2);
72  _ArrayCopy1D_((x+nvars*p2),(buf+nvars*p1),nvars);
73  _ArrayIncrementIndex_(ndims,bounds,index,done);
74  }
75  MPI_Send(buf,size*nvars,MPI_DOUBLE,dest_rank,2211,mpi->world);
76  free(buf);
77  } else if (mpi->rank == dest_rank) {
78  MPI_Status status;
79  MPI_Recv(xbuf,size*nvars,MPI_DOUBLE,source_rank,2211,mpi->world,&status);
80  } else {
81  fprintf(stderr,"Error in MPIGetArrayData3D(): Process %d shouldn't have entered this function.\n",
82  mpi->rank);
83  return(1);
84  }
85 #endif
86  }
87  return(0);
88 }
int MPIRank1D(int, int *, int *)
Definition: MPIRank1D.c:26
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
#define _ArraySubtract1D_(x, a, b, size)
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
Structure of MPI-related variables.
int MPILocalDomainLimits ( int  ndims,
int  p,
void *  m,
int *  dim_global,
int *  is,
int *  ie 
)

Calculate the local domain limits/extend in terms of the global domain

Computes the local domain limites: Given a MPI rank p, this function will compute the starting and ending indices of the local domain on this rank. These indices are global.

  • The starting and ending indices are stored in preallocated integer arrays, whose elements are these indices in each spatial dimension.
  • The ending index is 1 + actual last index; thus the one can write the loop as (i=start; i<end; i++) and not (i=start; i<=end; i++).
Parameters
ndimsNumber of spatial dimensions
pMPI rank
mMPI object of type MPIVariables
dim_globalInteger array with elements as global size in each spatial dimension
isInteger array whose elements will contain the starting index of the local domain on rank p
ieInteger array whose elements will contain the ending index of the local domain on rank p

Definition at line 18 of file MPILocalDomainLimits.c.

26 {
27  MPIVariables *mpi = (MPIVariables*) m;
28  int i;
30 
31  int ip[ndims];
32  IERR MPIRanknD(ndims,p,mpi->iproc,ip); CHECKERR(ierr);
33 
34  for (i=0; i<ndims; i++) {
35  int imax_local, isize, root = 0;
36  imax_local = MPIPartition1D(dim_global[i],mpi->iproc[i],root );
37  isize = MPIPartition1D(dim_global[i],mpi->iproc[i],ip[i]);
38  if (is) is[i] = ip[i]*imax_local;
39  if (ie) ie[i] = ip[i]*imax_local + isize;
40  }
41  return(0);
42 }
int MPIRanknD(int, int, int *, int *)
Definition: MPIRanknD.c:27
int MPIPartition1D(int, int, int)
#define CHECKERR(ierr)
Definition: basic.h:18
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17
int MPIMax_integer ( int *  global,
int *  var,
int  size,
void *  comm 
)

Find the maximum in an integer array over all ranks

Compute the global maximum over all MPI ranks in a given communicator for int datatype.

  • If var is an array of size greater than 1, global will be an array of the same size with each element as the maximum value of that element in var on all the MPI ranks in the given communicator.
Parameters
globalarray to contain the global maximums
varthe local array
sizesize of the local array
commMPI communicator

Definition at line 15 of file MPIMax.c.

21 {
22 #ifdef serial
23  int i;
24  for (i = 0; i < size; i++) global[i] = var[i];
25 #else
26  MPI_Allreduce((var==global?MPI_IN_PLACE:var),global,size,MPI_INT,MPI_MAX,*((MPI_Comm*)comm));
27 #endif
28  return(0);
29 }
int MPIMax_long ( long *  ,
long *  ,
int  ,
void *   
)

Find the maximum in a long integer array over all ranks

int MPIMax_double ( double *  global,
double *  var,
int  size,
void *  comm 
)

Find the maximum in a double array over all ranks

Compute the global maximum over all MPI ranks in a given communicator for double datatype.

  • If var is an array of size greater than 1, global will be an array of the same size with each element as the maximum value of that element in var on all the MPI ranks in the given communicator.
Parameters
globalarray to contain the global maximums
varthe local array
sizesize of the local array
commMPI communicator

Definition at line 38 of file MPIMax.c.

44 {
45 #ifdef serial
46  int i;
47  for (i = 0; i < size; i++) global[i] = var[i];
48 #else
49  MPI_Allreduce((var==global?MPI_IN_PLACE:var),global,size,MPI_DOUBLE,MPI_MAX,*((MPI_Comm*)comm));
50 #endif
51  return(0);
52 }
int MPIMin_integer ( int *  global,
int *  var,
int  size,
void *  comm 
)

Find the minimum in an integer array over all ranks

Compute the global minimum over all MPI ranks in a given communicator for int datatype.

  • If var is an array of size greater than 1, global will be an array of the same size with each element as the minimum value of he that element in var on all the MPI ranks in the given communicator.
Parameters
globalarray to contain the global minimums
varthe local array
sizesize of the local array
commMPI communicator

Definition at line 15 of file MPIMin.c.

21 {
22 #ifdef serial
23  int i;
24  for (i = 0; i < size; i++) global[i] = var[i];
25 #else
26  MPI_Allreduce((var==global?MPI_IN_PLACE:var),global,size,MPI_INT,MPI_MIN,*((MPI_Comm*)comm));
27 #endif
28  return(0);
29 }
int MPIMin_double ( double *  global,
double *  var,
int  size,
void *  comm 
)

Find the minimum in a double array over all ranks

Compute the global minimum over all MPI ranks in a given communicator for double datatype.

  • If var is an array of size greater than 1, global will be an array of the same size with each element as the minimum value of that element in var on all the MPI ranks in the given communicator.
Parameters
globalarray to contain the global minimums
varthe local array
sizesize of the local array
commMPI communicator

Definition at line 38 of file MPIMin.c.

44 {
45 #ifdef serial
46  int i;
47  for (i = 0; i < size; i++) global[i] = var[i];
48 #else
49  MPI_Allreduce((var==global?MPI_IN_PLACE:var),global,size,MPI_DOUBLE,MPI_MIN,*((MPI_Comm*)comm));
50 #endif
51  return(0);
52 }
int MPISum_double ( double *  global,
double *  var,
int  size,
void *  comm 
)

Calculate the sum of an array of doubles over all ranks

Compute the global sum over all MPI ranks in a given communicator for double datatype.

  • If var is an array of size greater than 1, global will be an array of the same size with each element as the sum of that element in var on all the MPI ranks in the given communicator.
Parameters
globalarray to contain the global sums
varthe local array
sizesize of the local array
commMPI communicator

Definition at line 39 of file MPISum.c.

45 {
46 #ifdef serial
47  int i;
48  for (i = 0; i < size; i++) global[i] = var[i];
49 #else
50  MPI_Allreduce((var==global?MPI_IN_PLACE:var),global,size,MPI_DOUBLE,MPI_SUM,*((MPI_Comm*)comm));
51 #endif
52  return(0);
53 }
int MPISum_integer ( int *  global,
int *  var,
int  size,
void *  comm 
)

Calculate the sum of an array of integers over all ranks

Compute the global sum over all MPI ranks in a given communicator for int datatype.

  • If var is an array of size greater than 1, global will be an array of the same size with each element as the sum of that element in var on all the MPI ranks in the given communicator.
Parameters
globalarray to contain the global sums
varthe local array
sizesize of the local array
commMPI communicator

Definition at line 16 of file MPISum.c.

22 {
23 #ifdef serial
24  int i;
25  for (i = 0; i < size; i++) global[i] = var[i];
26 #else
27  MPI_Allreduce((var==global?MPI_IN_PLACE:var),global,size,MPI_INT,MPI_SUM,*((MPI_Comm*)comm));
28 #endif
29  return(0);
30 }
int MPIPartition1D ( int  nglobal,
int  nproc,
int  rank 
)

Partition (along a dimension) the domain given global size and number of ranks

Given a 1D array of a given global size nglobal, and the total number of MPI ranks nproc on which it will be partitioned, this function computes the size of the local part of the 1D array on rank.

Parameters
nglobalGlobal size
nprocTotal number of ranks
rankRank

Definition at line 14 of file MPIPartition1D.c.

19 {
20  int nlocal;
21  if (nglobal%nproc == 0) nlocal = nglobal/nproc;
22  else {
23  if (rank == nproc-1) nlocal = nglobal/nproc + nglobal%nproc;
24  else nlocal = nglobal/nproc;
25  }
26  return(nlocal);
27 }
int MPIRank1D ( int  ndims,
int *  iproc,
int *  ip 
)

Calculate 1D rank from the n-dimensional rank

This function returns the 1D rank, given the n-dimensional rank and the total number of MPI ranks along each spatial dimension.

1D Rank: This is the rank of the process in the communicator.
n-Dimensional Rank: This represents an integer array, where each element is the rank of the process along a spatial dimension.

Consider a 2D simulation running with 21 MPI ranks - 7 along the x direction, and 3 along the y direction, as shown in the following figure:

nd_ranks.png
The boldface number in the parentheses is the n-dimensional rank (n=2), while the number below it in normal typeface is the 1D rank, corresponding to the rank in the MPI communicator.

Parameters
ndimsNumber of spatial dimensions
iprocInteger array whose elements are the number of MPI ranks along each dimension
ipInteger array whose elements are the rank of this process along each dimension

Definition at line 26 of file MPIRank1D.c.

31 {
32  int i,rank = 0, term = 1;
33  for (i=0; i<ndims; i++) {
34  rank += (ip[i]*term);
35  term *= iproc[i];
36  }
37 
38  return(rank);
39 }
int MPIRanknD ( int  ndims,
int  rank,
int *  iproc,
int *  ip 
)

Calculate the n-dimensional rank from the 1D rank

This function computes the n-dimensional rank, given the 1D rank and the total number of MPI ranks along each spatial dimension.

1D Rank: This is the rank of the process in the communicator.
n-Dimensional Rank: This represents an integer array, where each element is the rank of the process along a spatial dimension.

Consider a 2D simulation running with 21 MPI ranks - 7 along the x direction, and 3 along the y direction, as shown in the following figure:

nd_ranks.png
The boldface number in the parentheses is the n-dimensional rank (n=2), while the number below it in normal typeface is the 1D rank, corresponding to the rank in the MPI communicator.

Note:

  • the array to hold the computed n-dimensional rank must be preallocated and the size must be equal to the number of spatial dimensions.
Parameters
ndimsNumber of spatial dimensions
rank1D rank
iprocInteger array whose elements are the number of MPI ranks along each dimension
ipInteger array whose elements are the rank of this process along each dimesion

Definition at line 27 of file MPIRanknD.c.

33 {
34  int i,term = 1;
35  for (i=0; i<ndims; i++) term *= iproc[i];
36  for (i=ndims-1; i>=0; i--) {
37  term /= iproc[i];
38  ip[i] = rank/term;
39  rank -= ip[i]*term;
40  }
41  return(0);
42 }
void MPIGetFilename ( char *  root,
void *  c,
char *  filename 
)

Generate a unique filename given the rank of the process to let that process write to its own file

Get a string representing a filename indexed by the MPI rank: filename = root.index, where index is the string corresponding to the MPI rank.

Parameters
rootfilename root
cMPI communicator
filenamefilename

Definition at line 19 of file MPIGetFilename.c.

24 {
25  char tail[_MAX_STRING_SIZE_]="";
26  int rank;
27 
28 #ifndef serial
29  MPI_Comm comm = *((MPI_Comm*)c);
30  MPI_Comm_rank(comm,&rank);
31 #else
32  rank = 0;
33 #endif
34 
35  GetStringFromInteger(rank,tail,4);
36  strcpy(filename,"");
37  strcat(filename,root);
38  strcat(filename,"." );
39  strcat(filename,tail);
40 
41  return;
42 }
void GetStringFromInteger(int, char *, int)
#define _MAX_STRING_SIZE_
Definition: basic.h:14
int gpuMPIExchangeBoundariesnD ( int  ,
int  ,
const int *  ,
int  ,
void *  ,
double *   
)