HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
ReadArray.c File Reference

Read in a vector field from file. More...

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <basic.h>
#include <arrayfunctions.h>
#include <mpivars.h>
#include <hypar.h>

Go to the source code of this file.

Functions

static int ReadArraySerial (int, int, int *, int *, int, void *, void *, double *, double *, char *, int *)
 
static int ReadArrayParallel (int, int, int *, int *, int, void *, void *, double *, double *, char *, int *)
 
static int ReadArrayMPI_IO (int, int, int *, int *, int, void *, void *, double *, double *, char *, int *)
 
int ReadArray (int ndims, int nvars, int *dim_global, int *dim_local, int ghosts, void *s, void *m, double *x, double *u, char *fname_root, int *read_flag)
 

Detailed Description

Read in a vector field from file.

Author
Debojyoti Ghosh

Definition in file ReadArray.c.

Function Documentation

int ReadArraySerial ( int  ndims,
int  nvars,
int *  dim_global,
int *  dim_local,
int  ghosts,
void *  s,
void *  m,
double *  x,
double *  u,
char *  fname_root,
int *  read_flag 
)
static

Read an array in a serial fashion: For multi-processor simulation, only rank 0 reads in the entire solution from the file, and then distributes the relevant portions to each of the processors. This involves memory allocation for the global domain on rank 0. Thus, do not use for large domains. This approach is not very scalable either, if running with a very large number of processors (> 1000). Supports both binary and ASCII formats.

The name of the file being read is <fname_root>.inp

ASCII format:-
The input file should contain the ASCII data as follows:

x0_i (0 <= i < dim_global[0])
x1_i (0 <= i < dim_global[1])
...
x{ndims-1}_i (0 <= i < dim_global[ndims-1])
u0_p (0 <= p < N)
u1_p (0 <= p < N)
...
u{nvars-1}_p (0 <= p < N)

where
x0, x1, ..., x{ndims-1} represent the spatial dimensions (for a 3D problem, x0 = x, x1 = y, x2 = z),
u0, u1, ..., u{nvars-1} are each component of the vector u,
N = dim_global[0]*dim_global[1]*...*dim_global[ndims-1] is the total number of points,
and p = i0 + dim_global[0]*( i1 + dim_global[1]*( i2 + dim_global[2]*( ... + dim_global[ndims-2]*i{ndims-1} ))) (see _ArrayIndexnD_)
with i0, i1, i2, etc representing grid indices along each spatial dimension, i.e.,
0 <= i0 < dim_global[0]-1
0 <= i1 < dim_global[1]-1
...
0 <= i{ndims-1} < dim_global[ndims=1]-1


Binary format:-
The input file should contain the binary data in as follows:

x0_i (0 <= i < dim_global[0])
x1_i (0 <= i < dim_global[1])
...
x{ndims-1}_i (0 <= i < dim_global[ndims-1])
[u0,u1,...,u{nvars-1}]_p (0 <= p < N) (with no commas)

where
x0, x1, ..., x{ndims-1} represent the spatial dimensions (for a 3D problem, x0 = x, x1 = y, x2 = z),
u0, u1, ..., u{nvars-1} are each component of the vector u at a grid point,
N = dim_global[0]*dim_global[1]*...*dim_global[ndims-1] is the total number of points,
and p = i0 + dim_global[0]*( i1 + dim_global[1]*( i2 + dim_global[2]*( ... + dim_global[ndims-2]*i{ndims-1} ))) (see _ArrayIndexnD_)
with i0, i1, i2, etc representing grid indices along each spatial dimension, i.e.,
0 <= i0 < dim_global[0]-1
0 <= i1 < dim_global[1]-1
...
0 <= i{ndims-1} < dim_global[ndims=1]-1

For serial runs, this is the only input mode (of course!).

Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables per grid point
dim_globalInteger array of size ndims with global grid size in each dimension
dim_localInteger array of size ndims with local grid size in each dimension
ghostsNumber of ghost points
sSolver object of type HyPar
mMPI object of type MPIVariables
xGrid associated with the array (can be NULL)
uArray to hold the vector field being read
fname_rootFilename root
read_flagFlag to indicate if the file was read

Definition at line 150 of file ReadArray.c.

163 {
164  HyPar *solver = (HyPar*) s;
165  MPIVariables *mpi = (MPIVariables*) m;
166  int i, d, ferr, index[ndims];
167  double *ug = NULL, *xg = NULL;
169 
170  *read_flag = 0;
171  /* Only root process reads from the file */
172  if (!mpi->rank) {
173 
174  if (!strcmp(solver->ip_file_type,"ascii")) {
175  char filename[_MAX_STRING_SIZE_];
176  strcpy(filename,fname_root);
177  strcat(filename,".inp");
178  FILE *in; in = fopen(filename,"r");
179  if (!in) *read_flag = 0;
180  else {
181  *read_flag = 1;
182  /* Reading from file */
183  printf("Reading array from ASCII file %s (Serial mode).\n",filename);
184  int size,offset;
185  /* allocate global solution array */
186  size = 1; for (d=0; d<ndims; d++) size *= dim_global[d]; size *= nvars;
187  ug = (double*) calloc(size,sizeof(double));
188  size = 0; for (d=0; d<ndims; d++) size += dim_global[d];
189  xg = (double*) calloc(size,sizeof(double));
190 
191  /* read grid */
192  offset = 0;
193  for (d = 0; d < ndims; d++) {
194  for (i = 0; i < dim_global[d]; i++) {
195  ferr = fscanf(in,"%lf",&xg[i+offset]);
196  if (ferr != 1) {
197  printf("Error in ReadArraySerial(): unable to read data. ferr=%d\n", ferr);
198  exit(1);
199  }
200  }
201  offset += dim_global[d];
202  }
203 
204  /* read solution */
205  for (i = 0; i < nvars; i++) {
206  int done = 0; _ArraySetValue_(index,ndims,0);
207  while (!done) {
208  int p; _ArrayIndex1D_(ndims,dim_global,index,0,p);
209  ferr = fscanf(in,"%lf",&ug[p*nvars+i]);
210  if (ferr != 1) {
211  printf("Error in ReadArraySerial(): unable to read data. ferr=%d\n", ferr);
212  exit(1);
213  }
214  _ArrayIncrementIndex_(ndims,dim_global,index,done);
215  }
216  }
217 
218  fclose(in);
219  }
220  } else if ((!strcmp(solver->ip_file_type,"bin")) || (!strcmp(solver->ip_file_type,"binary"))) {
221 
222  char filename[_MAX_STRING_SIZE_];
223  strcpy(filename,fname_root);
224  strcat(filename,".inp");
225  FILE *in; in = fopen(filename,"rb");
226  if (!in) *read_flag = 0;
227  else {
228  *read_flag = 1;
229  printf("Reading array from binary file %s (Serial mode).\n",filename);
230  size_t bytes;
231  int size;
232  /* allocate global solution array */
233  size = 1; for (d=0; d<ndims; d++) size *= dim_global[d]; size *= nvars;
234  ug = (double*) calloc(size,sizeof(double));
235  size = 0; for (d=0; d<ndims; d++) size += dim_global[d];
236  xg = (double*) calloc(size,sizeof(double));
237 
238  /* read grid */
239  size = 0;
240  for (d = 0; d < ndims; d++) size += dim_global[d];
241  bytes = fread(xg, sizeof(double), size, in);
242  if ((int)bytes != size) {
243  fprintf(stderr,"Error in ReadArray(): Unable to read grid. Expected %d, Read %d.\n",
244  size, (int)bytes);
245  }
246 
247  /* read solution */
248  size = 1;
249  for (d = 0; d < ndims; d++) size *= dim_global[d]; size *= nvars;
250  bytes = fread(ug, sizeof(double), size, in);
251  if ((int)bytes != size) {
252  fprintf(stderr,"Error in ReadArray(): Unable to read solution. Expected %d, Read %d.\n",
253  size, (int)bytes);
254  }
255 
256  fclose(in);
257  }
258 
259  }
260  }
261 
262  /* Broadcast read_flag to all processes */
263  IERR MPIBroadcast_integer(read_flag,1,0,&mpi->world); CHECKERR(ierr);
264 
265  if (*read_flag) {
266 
267  /* partition global array to all processes */
268  IERR MPIPartitionArraynD(ndims,mpi,(mpi->rank?NULL:ug),u,dim_global,
269  dim_local,ghosts,nvars); CHECKERR(ierr);
270 
271  if (x) {
272  /* partition x vector across the processes */
273  int offset_global = 0, offset_local = 0;
274  for (d=0; d<ndims; d++) {
275  IERR MPIPartitionArray1D(mpi,(mpi->rank?NULL:&xg[offset_global]),
276  &x[offset_local+ghosts],
277  mpi->is[d],mpi->ie[d],dim_local[d],0); CHECKERR(ierr);
278  offset_global += dim_global[d];
279  offset_local += dim_local [d] + 2*ghosts;
280  }
281  }
282 
283  /* free global arrays */
284  if (!mpi->rank) {
285  free(ug);
286  free(xg);
287  }
288  }
289 
290  return(0);
291 }
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
int MPIPartitionArray1D(void *, double *, double *, int, int, int, int)
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
#define _MAX_STRING_SIZE_
Definition: basic.h:14
char ip_file_type[_MAX_STRING_SIZE_]
Definition: hypar.h:180
#define CHECKERR(ierr)
Definition: basic.h:18
int MPIPartitionArraynD(int, void *, double *, double *, int *, int *, int, int)
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _DECLARE_IERR_
Definition: basic.h:17
int MPIBroadcast_integer(int *, int, int, void *)
Definition: MPIBroadcast.c:23
int ReadArrayParallel ( int  ndims,
int  nvars,
int *  dim_global,
int *  dim_local,
int  ghosts,
void *  s,
void *  m,
double *  x,
double *  u,
char *  fname_root,
int *  read_flag 
)
static

Read in a vector field in a parallel fashion: The number of MPI ranks participating in file I/O is specified as an input (MPIVariables::N_IORanks). All the MPI ranks are divided into that many I/O groups, with one rank in each group as the "leader" that does the file reading and writing. For reading in the solution, the leader of an I/O group reads its own file and distributes the solution to the processors in its group. The number of I/O group is typically specified as the number of I/O nodes available on the HPC platform, given the number of compute nodes the code is running on. This is a good balance between all the processors serially reading from the same file, and having as many files (with the local solution) as the number of processors. This approach has been observed to be very scalable (up to ~ 100,000 - 1,000,000 processors).

  • Supports only binary format.

There should be as many files as the number of IO ranks (MPIVariables::N_IORanks). The files should be named as: <fname_root>_par.inp.<nnnn>, where <nnnn> is the string of formast "%04d" corresponding to integer n, 0 <= n < MPIVariables::N_IORanks.
Each file should contain the following data:
{
x0_i (0 <= i < dim_local[0])
x1_i (0 <= i < dim_local[1])
...
x{ndims-1}_i (0 <= i < dim_local[ndims-1])
[u0,u1,...,u{nvars-1}]_p (0 <= p < N) (with no commas)

where
x0, x1, ..., x{ndims-1} represent the spatial dimensions (for a 3D problem, x0 = x, x1 = y, x2 = z),
u0, u1, ..., u{nvars-1} are each component of the vector u at a grid point,
N = dim_local[0]*dim_local[1]*...*dim_local[ndims-1] is the total number of points,
and p = i0 + dim_local[0]*( i1 + dim_local[1]*( i2 + dim_local[2]*( ... + dim_global[ndims-2]*i{ndims-1} ))) (see _ArrayIndexnD_)
with i0, i1, i2, etc representing grid indices along each spatial dimension, i.e.,
0 <= i0 < dim_local[0]-1
0 <= i1 < dim_local[1]-1
...
0 <= i{ndims-1} < dim_local[ndims=1]-1
}
for each rank in the IO group corresponding to the file being read.

  • The above block represents the local grid and vector field for each rank
  • Each file should contain as many such blocks of data as there are members in the corresponding IO group.
  • The ranks that belong to a particular IO group are given as p, where MPIVariables::GroupStartRank <= p < MPIVariables::GroupEndRank
  • The code Extras/ParallelInput.c can generate the files <fname_root>_par.inp.<nnnn> from the file <fname_root>.inp that is read by ReadArraySerial() if input_mode in the input file "solver.inp" is set to "parallel n" where n is the number of IO ranks.
Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables per grid point
dim_globalInteger array of size ndims with global grid size in each dimension
dim_localInteger array of size ndims with local grid size in each dimension
ghostsNumber of ghost points
sSolver object of type HyPar
mMPI object of type MPIVariables
xGrid associated with the array (can be NULL)
uArray to hold the vector field being read
fname_rootFilename root
read_flagFlag to indicate if file was read

Definition at line 341 of file ReadArray.c.

354 {
355  HyPar *solver = (HyPar*) s;
356  MPIVariables *mpi = (MPIVariables*) m;
357  int proc,d;
359 
360  *read_flag = 1;
361  char filename_root[_MAX_STRING_SIZE_];
362  strcpy(filename_root,fname_root);
363  strcat(filename_root,"_par.inp");
364 
365  /* check for existence of the file */
366  if (mpi->IOParticipant) {
367  FILE *in;
368  char filename[_MAX_STRING_SIZE_];
369  MPIGetFilename(filename_root,&mpi->IOWorld,filename);
370  in = fopen(filename,"rb");
371  if (!in) *read_flag = 0;
372  else {
373  *read_flag = 1;
374  fclose(in);
375  }
376  }
377  IERR MPIMin_integer(read_flag,read_flag,1,&mpi->world);
378 
379  if (*read_flag) {
380 
381  if (!mpi->rank) printf("Reading from binary file %s.xxx (parallel mode).\n",filename_root);
382 
383  /* calculate size of the local grid on this rank */
384  int sizex = 0; for (d=0; d<ndims; d++) sizex += dim_local[d];
385  int sizeu = nvars; for (d=0; d<ndims; d++) sizeu *= dim_local[d];
386 
387  /* allocate buffer arrays to read in grid and solution */
388  double *buffer = (double*) calloc (sizex+sizeu, sizeof(double));
389 
390  if (mpi->IOParticipant) {
391 
392  /* if this rank is responsible for file I/O */
393  double *read_buffer = NULL;
394  int read_size_x, read_size_u, read_total_size;
395  int is[ndims], ie[ndims];
396 
397  /* open the file */
398  FILE *in;
399  int bytes;
400  char filename[_MAX_STRING_SIZE_];
401  MPIGetFilename(filename_root,&mpi->IOWorld,filename);
402 
403  in = fopen(filename,"rb");
404  if (!in) {
405  fprintf(stderr,"Error in ReadArrayParallel(): File %s could not be opened.\n",filename);
406  return(1);
407  }
408 
409  /* Read own data */
410  bytes = fread(buffer,sizeof(double),(sizex+sizeu),in);
411  if (bytes != (sizex+sizeu)) {
412  fprintf(stderr,"Error in ReadArrayParallel(): File %s contains insufficient data.\n",filename);
413  return(1);
414  }
415 
416  /* read and send the data for the other processors in this IO rank's group */
417  for (proc=mpi->GroupStartRank+1; proc<mpi->GroupEndRank; proc++) {
418  /* get the local domain limits for process proc */
419  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie);
420  /* calculate the size of its local data and allocate read buffer */
421  read_size_x = 0; for (d=0; d<ndims; d++) read_size_x += (ie[d]-is[d]);
422  read_size_u = nvars; for (d=0; d<ndims; d++) read_size_u *= (ie[d]-is[d]);
423  read_total_size = read_size_x + read_size_u;
424  read_buffer = (double*) calloc (read_total_size, sizeof(double));
425  /* read the data */
426  bytes = fread(read_buffer,sizeof(double),read_total_size,in);
427  if (bytes != read_total_size) {
428  fprintf(stderr,"Error in ReadArrayParallel(): File %s contains insufficient data.\n",filename);
429  return(1);
430  }
431  /* send the data */
432  MPI_Request req = MPI_REQUEST_NULL;
433  MPI_Isend(read_buffer,read_total_size,MPI_DOUBLE,proc,1100,mpi->world,&req);
434  MPI_Wait(&req,MPI_STATUS_IGNORE);
435  free(read_buffer);
436  }
437 
438  /* close the file */
439  fclose(in);
440 
441  } else {
442 
443  /* all other processes, just receive the data from
444  * the rank responsible for file I/O */
445  MPI_Request req = MPI_REQUEST_NULL;
446  MPI_Irecv(buffer,(sizex+sizeu),MPI_DOUBLE,mpi->IORank,1100,mpi->world,&req);
447  MPI_Wait(&req,MPI_STATUS_IGNORE);
448 
449  }
450 
451  /* copy the grid */
452  if (x) {
453  int offset1 = 0, offset2 = 0;
454  for (d = 0; d < ndims; d++) {
455  _ArrayCopy1D_((buffer+offset2),(x+offset1+ghosts),dim_local[d]);
456  offset1 += (dim_local[d]+2*ghosts);
457  offset2 += dim_local[d];
458  }
459  }
460 
461  /* copy the solution */
462  int index[ndims];
463  IERR ArrayCopynD(ndims,(buffer+sizex),u,dim_local,0,ghosts,index,nvars);
464  CHECKERR(ierr);
465 
466  /* free buffers */
467  free(buffer);
468  }
469 
470  return(0);
471 
472 }
INLINE int ArrayCopynD(int, const double *, double *, int *, int, int, int *, int)
MPI_Comm world
#define _MAX_STRING_SIZE_
Definition: basic.h:14
MPI_Comm IOWorld
#define _ArrayCopy1D_(x, y, size)
#define CHECKERR(ierr)
Definition: basic.h:18
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
void MPIGetFilename(char *, void *, char *)
#define IERR
Definition: basic.h:16
int MPIMin_integer(int *, int *, int, void *)
Definition: MPIMin.c:15
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _DECLARE_IERR_
Definition: basic.h:17
int ReadArrayMPI_IO ( int  ndims,
int  nvars,
int *  dim_global,
int *  dim_local,
int  ghosts,
void *  s,
void *  m,
double *  x,
double *  u,
char *  fname_root,
int *  read_flag 
)
static

Read in an array in a parallel fashion using MPI-IO: Similar to ReadArrayParallel(), except that the I/O leaders read from the same file using the MPI I/O routines, by calculating their respective offsets and reading the correct chunk of data from that offset. The MPI-IO functions (part of MPICH) are constantly being developed to be scalable on the latest and greatest HPC platforms.

  • Supports only binary format.

There should be as one file named as <fname_root>_mpi.inp. It should contain the following data:
{
x0_i (0 <= i < dim_local[0])
x1_i (0 <= i < dim_local[1])
...
x{ndims-1}_i (0 <= i < dim_local[ndims-1])
[u0,u1,...,u{nvars-1}]_p (0 <= p < N) (with no commas)

where
x0, x1, ..., x{ndims-1} represent the spatial dimensions (for a 3D problem, x0 = x, x1 = y, x2 = z),
u0, u1, ..., u{nvars-1} are each component of the vector u at a grid point,
N = dim_local[0]*dim_local[1]*...*dim_local[ndims-1] is the total number of points,
and p = i0 + dim_local[0]*( i1 + dim_local[1]*( i2 + dim_local[2]*( ... + dim_global[ndims-2]*i{ndims-1} ))) (see _ArrayIndexnD_)
with i0, i1, i2, etc representing grid indices along each spatial dimension, i.e.,
0 <= i0 < dim_local[0]-1
0 <= i1 < dim_local[1]-1
...
0 <= i{ndims-1} < dim_local[ndims=1]-1
}
for each rank, in the order of rank number (0 to nproc-1).

  • The above block represents the local grid and vector field for each rank
  • The file should contain as many such blocks of data as there are MPI ranks.
  • Each IO rank computes its offset to figure out where the data it's supposed to read exists in the file, and then reads it.
  • The code Extras/MPIInput.c can generate the file <fname_root>_mpi.inp from the file <fname_root>.inp that is read by ReadArraySerial() if input_mode in the input file "solver.inp" is set to "mpi-io n" where n is the number of IO ranks.
Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables per grid point
dim_globalInteger array of size ndims with global grid size in each dimension
dim_localInteger array of size ndims with local grid size in each dimension
ghostsNumber of ghost points
sSolver object of type HyPar
mMPI object of type MPIVariables
xGrid associated with the array (can be NULL)
uArray to hold the vector field being read
fname_rootFilename root
read_flagFlag to indicate if file was read

Definition at line 513 of file ReadArray.c.

526 {
527  HyPar *solver = (HyPar*) s;
528  MPIVariables *mpi = (MPIVariables*) m;
529  int proc,d;
531 
532  *read_flag = 0;
533  char filename[_MAX_STRING_SIZE_];
534  strcpy(filename,fname_root);
535  strcat(filename,"_mpi.inp");
536 
537  /* check for existence of file */
538  if (!mpi->rank) {
539  FILE *in;
540  in = fopen(filename,"rb");
541  if (!in) *read_flag = 0;
542  else {
543  *read_flag = 1;
544  fclose(in);
545  }
546  }
547  IERR MPIBroadcast_integer(read_flag,1,0,&mpi->world);
548 
549  if (*read_flag) {
550 
551  if (!mpi->rank) printf("Reading from binary file %s (MPI-IO mode).\n",filename);
552 
553  /* calculate size of the local grid on this rank */
554  int sizex = 0; for (d=0; d<ndims; d++) sizex += dim_local[d];
555  int sizeu = nvars; for (d=0; d<ndims; d++) sizeu *= dim_local[d];
556 
557  /* allocate buffer arrays to read in grid and solution */
558  double *buffer = (double*) calloc (sizex+sizeu, sizeof(double));
559 
560  if (mpi->IOParticipant) {
561 
562  /* if this rank is responsible for file I/O */
563  double *read_buffer = NULL;
564  int read_size_x, read_size_u, read_total_size;
565  int is[ndims], ie[ndims], size;
566 
567  /* calculate offset */
568  long long offset = 0;
569  for (proc=0; proc < mpi->rank; proc++) {
570  /* get the local domain limits for process proc */
571  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie);
572  /* calculate the size of its local grid */
573  size = 0; for (d=0; d<ndims; d++) size += (ie[d]-is[d]);
574  offset += size;
575  /* calculate the size of the local solution */
576  size = nvars; for (d=0; d<ndims; d++) size *= (ie[d]-is[d]);
577  offset += size;
578  }
579 
580  /* open the file */
581  MPI_Status status;
582  MPI_File in;
583  int error;
584  error = MPI_File_open(mpi->IOWorld,filename,MPI_MODE_RDONLY,MPI_INFO_NULL,&in);
585  if (error != MPI_SUCCESS) {
586  fprintf(stderr,"Error in ReadArrayMPI_IO(): Unable to open %s.\n",filename);
587  return(1);
588  }
589 
590  /* set offset */
591  MPI_Offset FileOffset = (MPI_Offset) (offset * sizeof(double));
592  MPI_File_seek(in,FileOffset,MPI_SEEK_SET);
593 
594  /* Read own data */
595  MPI_File_read(in,buffer,(sizex+sizeu)*sizeof(double),MPI_BYTE,&status);
596 
597  /* read and send the data for the other processors in this IO rank's group */
598  for (proc=mpi->GroupStartRank+1; proc<mpi->GroupEndRank; proc++) {
599  /* get the local domain limits for process proc */
600  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie);
601  /* calculate the size of its local data and allocate read buffer */
602  read_size_x = 0; for (d=0; d<ndims; d++) read_size_x += (ie[d]-is[d]);
603  read_size_u = nvars; for (d=0; d<ndims; d++) read_size_u *= (ie[d]-is[d]);
604  read_total_size = read_size_x + read_size_u;
605  read_buffer = (double*) calloc (read_total_size, sizeof(double));
606  /* read the data */
607  MPI_File_read(in,read_buffer,read_total_size*sizeof(double),MPI_BYTE,&status);
608  /* send the data */
609  MPI_Request req = MPI_REQUEST_NULL;
610  MPI_Isend(read_buffer,read_total_size,MPI_DOUBLE,proc,1100,mpi->world,&req);
611  MPI_Wait(&req,MPI_STATUS_IGNORE);
612  free(read_buffer);
613  }
614 
615  /* close the file */
616  MPI_File_close(&in);
617 
618  } else {
619 
620  /* all other processes, just receive the data from
621  * the rank responsible for file I/O */
622  MPI_Request req = MPI_REQUEST_NULL;
623  MPI_Irecv(buffer,(sizex+sizeu),MPI_DOUBLE,mpi->IORank,1100,mpi->world,&req);
624  MPI_Wait(&req,MPI_STATUS_IGNORE);
625 
626  }
627 
628  /* copy the grid */
629  if (x) {
630  int offset1 = 0, offset2 = 0;
631  for (d = 0; d < ndims; d++) {
632  _ArrayCopy1D_((buffer+offset2),(x+offset1+ghosts),dim_local[d]);
633  offset1 += (dim_local[d]+2*ghosts);
634  offset2 += dim_local[d];
635  }
636  }
637 
638  /* copy the solution */
639  int index[ndims];
640  IERR ArrayCopynD(ndims,(buffer+sizex),u,dim_local,0,ghosts,index,nvars);
641  CHECKERR(ierr);
642 
643  /* free buffers */
644  free(buffer);
645  }
646 
647  return(0);
648 
649 }
INLINE int ArrayCopynD(int, const double *, double *, int *, int, int, int *, int)
MPI_Comm world
#define _MAX_STRING_SIZE_
Definition: basic.h:14
MPI_Comm IOWorld
#define _ArrayCopy1D_(x, y, size)
#define CHECKERR(ierr)
Definition: basic.h:18
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _DECLARE_IERR_
Definition: basic.h:17
int MPIBroadcast_integer(int *, int, int, void *)
Definition: MPIBroadcast.c:23
int ReadArray ( int  ndims,
int  nvars,
int *  dim_global,
int *  dim_local,
int  ghosts,
void *  s,
void *  m,
double *  x,
double *  u,
char *  fname_root,
int *  read_flag 
)

Read in a vector field from file: wrapper function that calls the appropriate function depending on input mode (HyPar::input_mode).

The mode and type of input are specified through HyPar::input_mode and HyPar::ip_file_type. A vector field is read from file and stored in an array.

Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables per grid point
dim_globalInteger array of size ndims with global grid size in each dimension
dim_localInteger array of size ndims with local grid size in each dimension
ghostsNumber of ghost points
sSolver object of type HyPar
mMPI object of type MPIVariables
xGrid associated with the array (can be NULL)
uArray to hold the vector field
fname_rootFilename root
read_flagFlag to indicate if the file was read

Definition at line 25 of file ReadArray.c.

38 {
39  HyPar *solver = (HyPar*) s;
40  MPIVariables *mpi = (MPIVariables*) m;
42 
43  if (!strcmp(solver->input_mode,"serial")) {
44  IERR ReadArraySerial(ndims,nvars,dim_global,dim_local,ghosts,s,m,x,u,fname_root,read_flag);
45  CHECKERR(ierr);
46 #ifndef serial
47  } else if (!strcmp(solver->input_mode,"parallel")) {
48  ReadArrayParallel(ndims,nvars,dim_global,dim_local,ghosts,s,m,x,u,fname_root,read_flag);
49  CHECKERR(ierr);
50  } else if (!strcmp(solver->input_mode,"mpi-io" )) {
51  ReadArrayMPI_IO(ndims,nvars,dim_global,dim_local,ghosts,s,m,x,u,fname_root,read_flag);
52  CHECKERR(ierr);
53 #endif
54  } else {
55  fprintf(stderr,"Error: Illegal value (%s) for input_mode.\n",solver->input_mode);
56  return(1);
57  }
58 
59  if (x) {
60  int offset, d;
61  /* exchange MPI-boundary values of x between processors */
62  offset = 0;
63  for (d = 0; d < ndims; d++) {
64  IERR MPIExchangeBoundaries1D(mpi,&x[offset],dim_local[d],
65  ghosts,d,ndims); CHECKERR(ierr);
66  offset += (dim_local [d] + 2*ghosts);
67  }
68  /* fill in ghost values of x at physical boundaries by extrapolation */
69  offset = 0;
70  for (d = 0; d < ndims; d++) {
71  double *X = &x[offset];
72  int *dim = dim_local, i;
73  if (mpi->ip[d] == 0) {
74  /* fill left boundary along this dimension */
75  for (i = 0; i < ghosts; i++) {
76  int delta = ghosts - i;
77  X[i] = X[ghosts] + ((double) delta) * (X[ghosts]-X[ghosts+1]);
78  }
79  }
80  if (mpi->ip[d] == mpi->iproc[d]-1) {
81  /* fill right boundary along this dimension */
82  for (i = dim[d]+ghosts; i < dim[d]+2*ghosts; i++) {
83  int delta = i - (dim[d]+ghosts-1);
84  X[i] = X[dim[d]+ghosts-1]
85  + ((double) delta) * (X[dim[d]+ghosts-1]-X[dim[d]+ghosts-2]);
86  }
87  }
88  offset += (dim[d] + 2*ghosts);
89  }
90  }
91 
92  return(0);
93 }
char input_mode[_MAX_STRING_SIZE_]
Definition: hypar.h:177
int MPIExchangeBoundaries1D(void *, double *, int, int, int, int)
static int ReadArraySerial(int, int, int *, int *, int, void *, void *, double *, double *, char *, int *)
Definition: ReadArray.c:150
static int ReadArrayMPI_IO(int, int, int *, int *, int, void *, void *, double *, double *, char *, int *)
Definition: ReadArray.c:513
#define CHECKERR(ierr)
Definition: basic.h:18
#define IERR
Definition: basic.h:16
static int ReadArrayParallel(int, int, int *, int *, int, void *, void *, double *, double *, char *, int *)
Definition: ReadArray.c:341
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _DECLARE_IERR_
Definition: basic.h:17