HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
MPIExchangeBoundariesnD.c
Go to the documentation of this file.
1 
6 #include <stdlib.h>
7 #include <basic.h>
8 #include <arrayfunctions.h>
9 #include <mpivars.h>
10 
43  int ndims,
44  int nvars,
45  int *dim,
46  int ghosts,
47  void *m,
48  double *var
49  )
50 {
51 #ifndef serial
52  MPIVariables *mpi = (MPIVariables*) m;
53  int d;
54 
55  int *ip = mpi->ip;
56  int *iproc = mpi->iproc;
57  int *bcflag = mpi->bcperiodic;
58 
59  int neighbor_rank[2*ndims], nip[ndims], index[ndims], bounds[ndims], offset[ndims];
60  MPI_Request rcvreq[2*ndims], sndreq[2*ndims];
61  for (d=0; d<2*ndims; d++) rcvreq[d] = sndreq[d] = MPI_REQUEST_NULL;
62 
63  /* each process has 2*ndims neighbors (except at non-periodic physical boundaries) */
64  /* calculate the rank of these neighbors (-1 -> none) */
65  for (d = 0; d < ndims; d++) {
66  _ArrayCopy1D_(ip,nip,ndims);
67  if (ip[d] == 0) nip[d] = iproc[d]-1;
68  else nip[d]--;
69  if ((ip[d] == 0) && (!bcflag[d])) neighbor_rank[2*d] = -1;
70  else neighbor_rank[2*d] = MPIRank1D(ndims,iproc,nip);
71  _ArrayCopy1D_(ip,nip,ndims);
72  if (ip[d] == (iproc[d]-1)) nip[d] = 0;
73  else nip[d]++;
74  if ((ip[d] == (iproc[d]-1)) && (!bcflag[d])) neighbor_rank[2*d+1] = -1;
75  else neighbor_rank[2*d+1] = MPIRank1D(ndims,iproc,nip);
76  }
77 
78  /* calculate dimensions of each of the send-receive regions */
79  double *sendbuf = mpi->sendbuf;
80  double *recvbuf = mpi->recvbuf;
81  int stride = mpi->maxbuf;
82  int bufdim[ndims];
83  for (d = 0; d < ndims; d++) {
84  bufdim[d] = 1;
85  int i;
86  for (i = 0; i < ndims; i++) {
87  if (i == d) bufdim[d] *= ghosts;
88  else bufdim[d] *= dim[i];
89  }
90  }
91 
92  /* post the receive requests */
93  for (d = 0; d < ndims; d++) {
94  if (neighbor_rank[2*d ] != -1) {
95  MPI_Irecv(&recvbuf[2*d*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d ],1630,
96  mpi->world,&rcvreq[2*d]);
97  }
98  if (neighbor_rank[2*d+1] != -1) {
99  MPI_Irecv(&recvbuf[(2*d+1)*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d+1],1631,
100  mpi->world,&rcvreq[2*d+1]);
101  }
102  }
103 
104  /* count number of neighbors and copy data to send buffers */
105  for (d = 0; d < ndims; d++) {
106  _ArrayCopy1D_(dim,bounds,ndims); bounds[d] = ghosts;
107  if (neighbor_rank[2*d] != -1) {
108  _ArraySetValue_(offset,ndims,0);
109  int done = 0; _ArraySetValue_(index,ndims,0);
110  while (!done) {
111  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
112  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
113  _ArrayCopy1D_((var+nvars*p1),(sendbuf+2*d*stride+nvars*p2),nvars);
114  _ArrayIncrementIndex_(ndims,bounds,index,done);
115  }
116  }
117  if (neighbor_rank[2*d+1] != -1) {
118  _ArraySetValue_(offset,ndims,0);offset[d] = dim[d]-ghosts;
119  int done = 0; _ArraySetValue_(index,ndims,0);
120  while (!done) {
121  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
122  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
123  _ArrayCopy1D_((var+nvars*p1),(sendbuf+(2*d+1)*stride+nvars*p2),nvars);
124  _ArrayIncrementIndex_(ndims,bounds,index,done);
125  }
126  }
127  }
128 
129  /* send the data */
130  for (d = 0; d < ndims; d++) {
131  if (neighbor_rank[2*d ] != -1) {
132  MPI_Isend(&sendbuf[2*d*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d ],1631,
133  mpi->world,&sndreq[2*d]);
134  }
135  if (neighbor_rank[2*d+1] != -1) {
136  MPI_Isend(&sendbuf[(2*d+1)*stride],bufdim[d]*nvars,MPI_DOUBLE,neighbor_rank[2*d+1],1630,
137  mpi->world,&sndreq[2*d+1]);
138  }
139  }
140 
141  /* Wait till data is done received */
142  MPI_Status status_arr[2*ndims];
143  MPI_Waitall(2*ndims,rcvreq,status_arr);
144  /* copy received data to ghost points */
145  for (d = 0; d < ndims; d++) {
146  _ArrayCopy1D_(dim,bounds,ndims); bounds[d] = ghosts;
147  if (neighbor_rank[2*d] != -1) {
148  _ArraySetValue_(offset,ndims,0); offset[d] = -ghosts;
149  int done = 0; _ArraySetValue_(index,ndims,0);
150  while (!done) {
151  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
152  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
153  _ArrayCopy1D_((recvbuf+2*d*stride+nvars*p2),(var+nvars*p1),nvars);
154  _ArrayIncrementIndex_(ndims,bounds,index,done);
155  }
156  }
157  if (neighbor_rank[2*d+1] != -1) {
158  _ArraySetValue_(offset,ndims,0); offset[d] = dim[d];
159  int done = 0; _ArraySetValue_(index,ndims,0);
160  while (!done) {
161  int p1; _ArrayIndex1DWO_(ndims,dim,index,offset,ghosts,p1);
162  int p2; _ArrayIndex1D_(ndims,bounds,index,0,p2);
163  _ArrayCopy1D_((recvbuf+(2*d+1)*stride+nvars*p2),(var+nvars*p1),nvars);
164  _ArrayIncrementIndex_(ndims,bounds,index,done);
165  }
166  }
167  }
168  /* Wait till send requests are complete before freeing memory */
169  MPI_Waitall(2*ndims,sndreq,status_arr);
170 
171 #endif
172  return(0);
173 }
174 
int MPIRank1D(int, int *, int *)
Definition: MPIRank1D.c:26
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
double * sendbuf
MPI related function definitions.
double * recvbuf
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArrayCopy1D_(x, y, size)
Some basic definitions and macros.
Contains macros and function definitions for common array operations.
int MPIExchangeBoundariesnD(int, int, int *, int, void *, double *)
Structure of MPI-related variables.