HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
MPIExchangeBoundariesnD.c File Reference

Exchange data and fill in ghost points for an n-dimensional array. More...

#include <stdlib.h>
#include <basic.h>
#include <arrayfunctions.h>
#include <mpivars.h>

Go to the source code of this file.

Functions

int MPIExchangeBoundariesnD (int ndims, int nvars, int *dim, int ghosts, void *m, double *var)
 

Detailed Description

Exchange data and fill in ghost points for an n-dimensional array.

Author
Debojyoti Ghosh

Definition in file MPIExchangeBoundariesnD.c.

Function Documentation

int MPIExchangeBoundariesnD ( int  ndims,
int  nvars,
int *  dim,
int  ghosts,
void *  m,
double *  var 
)

Exchange data across MPI ranks, and fill in ghost points for an n-dimensional array (where n is the total number of spatial dimensions). If any of the physical boundaries are periodic, this function also exchanges data and fills in the ghost points for these boundaries.

The n-dimensional array must be stored in the memory as a single-index array, with the following order of mapping:

  • Number of variables (vector components)
  • Spatial dimension 0
  • Spatial dimension 1
  • ...
  • Spatial dimensions ndims-1

For example, consider a 2D simulation (ndims = 2), of size \(7 \times 3\), with \(4\) vector components (nvars = 4). The following figure shows the layout (without the ghost points):

layout.png

The bold numbers in parentheses represent the 2D indices. The numbers below them are the indices of the array that correspond to that 2D location. Thus, elements 40,41,42, and 43 in the array are the 1st, 2nd, 3rd, and 4th vector components at location (1,3).

If \({\bf i}\left[{\rm ndims}\right]\) is an integer array representing an n-dimensional index (for example, \(\left(5,4\right)\) in 2D, \(\left(3,5,2\right)\) in 3D), and the number of vector components is nvars, then:

  • _ArrayIndex1D_ computes the index \(p\) in the array corresponding to \({\bf i}\). In the above example, \({\bf i} = \left(1,3\right) \rightarrow p = 10\).
  • var[nvars*p+v] accesses the v-th component of the n-dimensional array var at location \({\bf i}\). In the above example, to access the 3rd vector component at location \(\left(1,3\right)\), we have \(p=10\), so var [4*10+2] = var [42].
Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables (vector components) at each grid location
dimInteger array whose elements are the local size along each spatial dimension
ghostsNumber of ghost points
mMPI object of type MPIVariables
varThe array for which to exchange data and fill in ghost points

Definition at line 42 of file MPIExchangeBoundariesnD.c.

50 {
51 #ifndef serial
52  MPIVariables *mpi = (MPIVariables*) m;
53  int d;
54 
55  int *ip = mpi->ip;
56  int *iproc = mpi->iproc;
57  int *bcflag = mpi->bcperiodic;
58 
59  int neighbor_rank[2*ndims], nip[ndims], index[ndims], bounds[ndims], offset[ndims];
60  MPI_Request rcvreq[2*ndims], sndreq[2*ndims];
61  for (d=0; d<2*ndims; d++) rcvreq[d] = sndreq[d] = MPI_REQUEST_NULL;
62 
63  /* each process has 2*ndims neighbors (except at non-periodic physical boundaries) */
64  /* calculate the rank of these neighbors (-1 -> none) */
65  for (d = 0; d < ndims; d++) {
66  _ArrayCopy1D_(ip,nip,ndims);
67  if (ip[d] == 0) nip[d] = iproc[d]-1;
68  else nip[d]--;
69  if ((ip[d] == 0) && (!bcflag[d])) neighbor_rank[2*d] = -1;
70  else neighbor_rank[2*d] = MPIRank1D(ndims,iproc,nip);
71  _ArrayCopy1D_(ip,nip,ndims);
72  if (ip[d] == (iproc[d]-1)) nip[d] = 0;
73  else nip[d]++;
74  if ((ip[d] == (iproc[d]-1)) && (!bcflag[d])) neighbor_rank[2*d+1] = -1;
75  else neighbor_rank[2*d+1] = MPIRank1D(ndims,iproc,nip);
76  }
77 
78  /* calculate dimensions of each of the send-receive regions */
79  double *sendbuf = mpi->sendbuf;
80  double *recvbuf = mpi->recvbuf;
81  int stride = mpi->maxbuf;
82  int bufdim[ndims];
83  for (d = 0; d < ndims; d++) {
84  bufdim[d] = 1;
85  int i;
86  for (i = 0; i < ndims; i++) {
87  if (i == d) bufdim[d] *= ghosts;
88  else bufdim[d] *= dim[i];
89  }
90  }
91 
92  /* post the receive requests */
93  for (d = 0; d < ndims; d++) {
94  if (neighbor_rank[2*d ] != -1) {
95  MPI_Irecv(&recvbuf[2*d*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d ],1630,
96  mpi->world,&rcvreq[2*d]);
97  }
98  if (neighbor_rank[2*d+1] != -1) {
99  MPI_Irecv(&recvbuf[(2*d+1)*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d+1],1631,
100  mpi->world,&rcvreq[2*d+1]);
101  }
102  }
103 
104  /* count number of neighbors and copy data to send buffers */
105  for (d = 0; d < ndims; d++) {
106  _ArrayCopy1D_(dim,bounds,ndims); bounds[d] = ghosts;
107  if (neighbor_rank[2*d] != -1) {
108  _ArraySetValue_(offset,ndims,0);
109  int done = 0; _ArraySetValue_(index,ndims,0);
110  while (!done) {
111  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
112  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
113  _ArrayCopy1D_((var+nvars*p1),(sendbuf+2*d*stride+nvars*p2),nvars);
114  _ArrayIncrementIndex_(ndims,bounds,index,done);
115  }
116  }
117  if (neighbor_rank[2*d+1] != -1) {
118  _ArraySetValue_(offset,ndims,0);offset[d] = dim[d]-ghosts;
119  int done = 0; _ArraySetValue_(index,ndims,0);
120  while (!done) {
121  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
122  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
123  _ArrayCopy1D_((var+nvars*p1),(sendbuf+(2*d+1)*stride+nvars*p2),nvars);
124  _ArrayIncrementIndex_(ndims,bounds,index,done);
125  }
126  }
127  }
128 
129  /* send the data */
130  for (d = 0; d < ndims; d++) {
131  if (neighbor_rank[2*d ] != -1) {
132  MPI_Isend(&sendbuf[2*d*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d ],1631,
133  mpi->world,&sndreq[2*d]);
134  }
135  if (neighbor_rank[2*d+1] != -1) {
136  MPI_Isend(&sendbuf[(2*d+1)*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d+1],1630,
137  mpi->world,&sndreq[2*d+1]);
138  }
139  }
140 
141  /* Wait till data is done received */
142  MPI_Status status_arr[2*ndims];
143  MPI_Waitall(2*ndims,rcvreq,status_arr);
144  /* copy received data to ghost points */
145  for (d = 0; d < ndims; d++) {
146  _ArrayCopy1D_(dim,bounds,ndims); bounds[d] = ghosts;
147  if (neighbor_rank[2*d] != -1) {
148  _ArraySetValue_(offset,ndims,0); offset[d] = -ghosts;
149  int done = 0; _ArraySetValue_(index,ndims,0);
150  while (!done) {
151  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
152  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
153  _ArrayCopy1D_((recvbuf+2*d*stride+nvars*p2),(var+nvars*p1),nvars);
154  _ArrayIncrementIndex_(ndims,bounds,index,done);
155  }
156  }
157  if (neighbor_rank[2*d+1] != -1) {
158  _ArraySetValue_(offset,ndims,0); offset[d] = dim[d];
159  int done = 0; _ArraySetValue_(index,ndims,0);
160  while (!done) {
161  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
162  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
163  _ArrayCopy1D_((recvbuf+(2*d+1)*stride+nvars*p2),(var+nvars*p1),nvars);
164  _ArrayIncrementIndex_(ndims,bounds,index,done);
165  }
166  }
167  }
168  /* Wait till send requests are complete before freeing memory */
169  MPI_Waitall(2*ndims,sndreq,status_arr);
170 
171 #endif
172  return(0);
173 }
int MPIRank1D(int, int *, int *)
Definition: MPIRank1D.c:26
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
double * sendbuf
double * recvbuf
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
Structure of MPI-related variables.