HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
WriteArray.c File Reference

Write a vector field, stored as an array, to file. More...

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <basic.h>
#include <arrayfunctions.h>
#include <mpivars.h>
#include <hypar.h>

Go to the source code of this file.

Functions

static int WriteArraySerial (int, int, int *, int *, int, double *, double *, void *, void *, char *)
 
static int WriteArrayParallel (int, int, int *, int *, int, double *, double *, void *, void *, char *)
 
int WriteArray (int ndims, int nvars, int *dim_global, int *dim_local, int ghosts, double *x, double *u, void *s, void *m, char *fname_root)
 

Detailed Description

Write a vector field, stored as an array, to file.

Author
Debojyoti Ghosh Contains functions to write out a vector field, stored as an array to a file.

Definition in file WriteArray.c.

Function Documentation

◆ WriteArraySerial()

int WriteArraySerial ( int  ndims,
int  nvars,
int *  dim_global,
int *  dim_local,
int  ghosts,
double *  x,
double *  u,
void *  s,
void *  m,
char *  fname_root 
)
static

Function to write out a vector field, stored as an array, and its associated Cartesian grid to a file in serial mode. It will allocate the global domain on rank 0, so do not use for big problems for which the entire global domain will not fit on one node. This approach is also not very scalable.

See also
WriteBinary(), WriteText(), WriteTecplot2D(), WriteTecplot3D()
Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables per grid point
dim_globalInteger array of size ndims with global grid size in each dimension
dim_localInteger array of size ndims with local grid size in each dimension
ghostsNumber of ghost points
xArray of spatial coordinates (i.e. the grid)
uVector field to write
sSolver object of type HyPar
mMPI object of type MPIVariables
fname_rootFilename root (extension is added automatically). For unsteady output, a numerical index is added that is the same as for the solution output files.

Definition at line 74 of file WriteArray.c.

87 {
88  HyPar *solver = (HyPar*) s;
89  MPIVariables *mpi = (MPIVariables*)m;
90  int d;
92 
93  /* root process: allocate global output arrays */
94  double *ug, *xg;
95  if (!mpi->rank) {
96  int size_global;
97 
98  size_global = 1;
99  for (d=0; d<ndims; d++) size_global *= dim_global[d];
100  ug = (double*) calloc (size_global*nvars,sizeof(double));
101  _ArraySetValue_(ug,size_global*nvars,0.0);
102 
103  size_global = 0;
104  for (d=0; d<ndims; d++) size_global += dim_global[d];
105  xg = (double*) calloc (size_global,sizeof(double));
106  _ArraySetValue_(xg,size_global,0.0); CHECKERR(ierr);
107 
108  } else {
109 
110  /* null pointers on non-root processes */
111  ug = xg = NULL;
112 
113  }
114 
115  /* Assemble the local output arrays into the global output arrays */
116  IERR MPIGatherArraynD(ndims,mpi,ug,u,dim_global,dim_local,
117  ghosts,nvars); CHECKERR(ierr);
118  int offset_global, offset_local;
119  offset_global = offset_local = 0;
120  for (d=0; d<ndims; d++) {
121  IERR MPIGatherArray1D(mpi,(mpi->rank?NULL:&xg[offset_global]),
122  &x[offset_local+ghosts],
123  mpi->is[d],mpi->ie[d],dim_local[d],0); CHECKERR(ierr);
124  offset_global += dim_global[d];
125  offset_local += dim_local [d] + 2*ghosts;
126  }
127 
128  if (!mpi->rank) {
129  /* write output file to disk */
130  char filename[_MAX_STRING_SIZE_] = "";
131  strcat(filename,fname_root);
132  if (!strcmp(solver->op_overwrite,"no")) {
133  strcat(filename,"_");
134  strcat(filename,solver->filename_index);
135  }
136  strcat(filename,solver->solnfilename_extn);
137  printf("Writing solution file %s.\n",filename);
138  IERR solver->WriteOutput(ndims,nvars,dim_global,xg,ug,filename,
139  solver->index); CHECKERR(ierr);
140 
141  /* Clean up output arrays */
142  free(xg);
143  free(ug);
144  }
145 
146  return(0);
147 }
#define IERR
Definition: basic.h:16
char * filename_index
Definition: hypar.h:197
#define CHECKERR(ierr)
Definition: basic.h:18
int MPIGatherArraynD(int, void *, double *, double *, int *, int *, int, int)
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _MAX_STRING_SIZE_
Definition: basic.h:14
int MPIGatherArray1D(void *, double *, double *, int, int, int, int)
char op_overwrite[_MAX_STRING_SIZE_]
Definition: hypar.h:191
int * index
Definition: hypar.h:102
char solnfilename_extn[_MAX_STRING_SIZE_]
Definition: hypar.h:201
#define _ArraySetValue_(x, size, value)
int(* WriteOutput)(int, int, int *, double *, double *, char *, int *)
Definition: hypar.h:211
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17

◆ WriteArrayParallel()

int WriteArrayParallel ( int  ndims,
int  nvars,
int *  dim_global,
int *  dim_local,
int  ghosts,
double *  x,
double *  u,
void *  s,
void *  m,
char *  fname_root 
)
static

Write a vector field, stored as an array, and its associated Cartesian grid to a file in parallel. All the MPI ranks are divided into IO groups, each with a group leader that writes out the data for the ranks in its group. The number of groups (and thus, the number of ranks participating in file I/O) is given by MPIVariables::N_IORanks. The group leader receives the local data from each rank in its group, and writes it out to the corresponding file.

  • The data is written in binary format only.
  • The number of files written is equal to the number of IO groups (MPIVariables::N_IORanks), and are named as <fname_root>.bin.nnnn where "nnnn" is a string of format "%04d" representing n, 0 <= n < MPIVariables::N_IORanks.
  • Each file contains the following blocks of data:
    {
    x0_i (0 <= i < dim_local[0])
    x1_i (0 <= i < dim_local[1])
    ...
    x{ndims-1}_i (0 <= i < dim_local[ndims-1])
    [u0,u1,...,u{nvars-1}]_p (0 <= p < N) (with no commas)

    where
    x0, x1, ..., x{ndims-1} represent the spatial dimensions (for a 3D problem, x0 = x, x1 = y, x2 = z),
    u0, u1, ..., u{nvars-1} are each component of the vector u at a grid point,
    N = dim_local[0]*dim_local[1]*...*dim_local[ndims-1] is the total number of points,
    and p = i0 + dim_local[0]*( i1 + dim_local[1]*( i2 + dim_local[2]*( ... _ i{ndims-1} )))
    with i0, i1, i2, etc representing grid indices along each spatial dimension, i.e.,
    0 <= i0 < dim_local[0]-1
    0 <= i1 < dim_local[1]-1
    ...
    0 <= i{ndims-1} < dim_local[ndims=1]-1
    }
    for each rank in the corresponding group (i.e., there are MPIVariables::nproc divided by MPIVariables::N_IORanks such blocks in each file).
  • To stitch all the local data in these files into the global solution, and write that out to a binary file can be done by Extras/ParallelOutput.c.
  • If HyPar::op_overwrite is set to 0, the vector field at the various simulation times are appended to each of the <fname_root>.bin.nnnn. The code Extras/ParallelOutput.c will take care of writing the global solution at each simulation time to a different file (in binary format) (the same files that WriteArraySerial() would have written out if serial file output was chosen).

This approach has been observed to be very scalable (with up to ~500,000 MPI ranks). The number of MPI ranks participating in file I/O (MPIVariables::N_IORanks) should be set to the number of I/O nodes available on a HPC platform, given the number of compute nodes the simulation is running on.

Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables per grid point
dim_globalInteger array of size ndims with global grid size in each dimension
dim_localInteger array of size ndims with local grid size in each dimension
ghostsNumber of ghost points
xArray of spatial coordinates (i.e. the grid)
uVector field to write
sSolver object of type HyPar
mMPI object of type MPIVariables
fname_rootFilename root (extension is added automatically). For unsteady output, a numerical index is added that is the same as for the solution output files.

Definition at line 194 of file WriteArray.c.

207 {
208  HyPar *solver = (HyPar*) s;
209  MPIVariables *mpi = (MPIVariables*) m;
210  int proc,d;
212 
213  static int count = 0;
214 
215  char filename_root[_MAX_STRING_SIZE_];
216  strcpy(filename_root,fname_root);
217  strcat(filename_root,solver->solnfilename_extn);
218  if (!mpi->rank) printf("Writing solution file %s.xxxx (parallel mode).\n",filename_root);
219 
220  /* calculate size of the local grid on this rank */
221  int sizex = 0; for (d=0; d<ndims; d++) sizex += dim_local[d];
222  int sizeu = nvars; for (d=0; d<ndims; d++) sizeu *= dim_local[d];
223 
224  /* allocate buffer arrays to write grid and solution */
225  double *buffer = (double*) calloc (sizex+sizeu, sizeof(double));
226 
227  /* copy the grid to buffer */
228  int offset1 = 0, offset2 = 0;
229  for (d = 0; d < ndims; d++) {
230  _ArrayCopy1D_((x+offset1+ghosts),(buffer+offset2),dim_local[d]);
231  offset1 += (dim_local[d]+2*ghosts);
232  offset2 += dim_local[d];
233  }
234 
235  /* copy the solution */
236  int index[ndims];
237  IERR ArrayCopynD(ndims,u,(buffer+sizex),dim_local,ghosts,0,index,nvars); CHECKERR(ierr);
238 
239  if (mpi->IOParticipant) {
240 
241  /* if this rank is responsible for file I/O */
242  double *write_buffer = NULL;
243  int write_size_x, write_size_u, write_total_size;
244  int is[ndims], ie[ndims];
245 
246  /* open the file */
247  FILE *out;
248  int bytes;
249  char filename[_MAX_STRING_SIZE_];
250  MPIGetFilename(filename_root,&mpi->IOWorld,filename);
251 
252  if (!strcmp(solver->op_overwrite,"no")) {
253  if ((!count) && (!solver->restart_iter)) {
254  /* open a new file, since this function is being called the first time
255  and this is not a restart run*/
256  out = fopen(filename,"wb");
257  if (!out) {
258  fprintf(stderr,"Error in WriteArrayParallel(): File %s could not be opened for writing.\n",filename);
259  return(1);
260  }
261  } else {
262  /* append to existing file */
263  out = fopen(filename,"ab");
264  if (!out) {
265  fprintf(stderr,"Error in WriteArrayParallel(): File %s could not be opened for appending.\n",filename);
266  return(1);
267  }
268  }
269  } else {
270  /* write a new file / overwrite existing file */
271  out = fopen(filename,"wb");
272  if (!out) {
273  fprintf(stderr,"Error in WriteArrayParallel(): File %s could not be opened for writing.\n",filename);
274  return(1);
275  }
276  }
277  count++;
278 
279  /* Write own data and free buffer */
280  bytes = fwrite(buffer,sizeof(double),(sizex+sizeu),out);
281  if (bytes != (sizex+sizeu)) {
282  fprintf(stderr,"Error in WriteArrayParallel(): Failed to write data to file %s.\n",filename);
283  return(1);
284  }
285  free(buffer);
286 
287  /* receive and write the data for the other processors in this IO rank's group */
288  for (proc=mpi->GroupStartRank+1; proc<mpi->GroupEndRank; proc++) {
289  /* get the local domain limits for process proc */
290  IERR MPILocalDomainLimits(ndims,proc,mpi,dim_global,is,ie);
291  /* calculate the size of its local data and allocate write buffer */
292  write_size_x = 0; for (d=0; d<ndims; d++) write_size_x += (ie[d]-is[d]);
293  write_size_u = nvars; for (d=0; d<ndims; d++) write_size_u *= (ie[d]-is[d]);
294  write_total_size = write_size_x + write_size_u;
295  write_buffer = (double*) calloc (write_total_size, sizeof(double));
296  /* receive the data */
297  MPI_Request req = MPI_REQUEST_NULL;
298  MPI_Irecv(write_buffer,write_total_size,MPI_DOUBLE,proc,1449,mpi->world,&req);
299  MPI_Wait(&req,MPI_STATUS_IGNORE);
300  /* write the data */
301  bytes = fwrite(write_buffer,sizeof(double),write_total_size,out);
302  if (bytes != write_total_size) {
303  fprintf(stderr,"Error in WriteArrayParallel(): Failed to write data to file %s.\n",filename);
304  return(1);
305  }
306  free(write_buffer);
307  }
308 
309  /* close the file */
310  fclose(out);
311 
312  } else {
313 
314  /* all other processes, just send the data to the rank responsible for file I/O */
315  MPI_Request req = MPI_REQUEST_NULL;
316  MPI_Isend(buffer,(sizex+sizeu),MPI_DOUBLE,mpi->IORank,1449,mpi->world,&req);
317  MPI_Wait(&req,MPI_STATUS_IGNORE);
318  free(buffer);
319 
320  }
321 
322  return(0);
323 }
#define IERR
Definition: basic.h:16
#define CHECKERR(ierr)
Definition: basic.h:18
int restart_iter
Definition: hypar.h:58
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _MAX_STRING_SIZE_
Definition: basic.h:14
void MPIGetFilename(char *, void *, char *)
char op_overwrite[_MAX_STRING_SIZE_]
Definition: hypar.h:191
MPI_Comm world
INLINE int ArrayCopynD(int, const double *, double *, int *, int, int, int *, int)
char solnfilename_extn[_MAX_STRING_SIZE_]
Definition: hypar.h:201
MPI_Comm IOWorld
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17
#define _ArrayCopy1D_(x, y, size)

◆ WriteArray()

int WriteArray ( int  ndims,
int  nvars,
int *  dim_global,
int *  dim_local,
int  ghosts,
double *  x,
double *  u,
void *  s,
void *  m,
char *  fname_root 
)

Write out a vector field, stored as an array, to file: wrapper function that calls the appropriate function depending on output mode (HyPar::output_mode). The output file format is determined by HyPar::op_file_format

Parameters
ndimsNumber of spatial dimensions
nvarsNumber of variables per grid point
dim_globalInteger array of size ndims with global grid size in each dimension
dim_localInteger array of size ndims with local grid size in each dimension
ghostsNumber of ghost points
xArray of spatial coordinates (i.e. the grid)
uVector field to write
sSolver object of type HyPar
mMPI object of type MPIVariables
fname_rootFilename root (extension is added automatically). For unsteady output, a numerical index is added that is the same as for the solution output files.

Definition at line 27 of file WriteArray.c.

40 {
41  HyPar *solver = (HyPar*) s;
42  MPIVariables *mpi = (MPIVariables*)m;
44 
45  /* if WriteOutput() is NULL, then return */
46  if (!solver->WriteOutput) return(0);
47 
48 #ifndef serial
49  if (!strcmp(solver->output_mode,"serial")) {
50 #endif
51  IERR WriteArraySerial(ndims,nvars,dim_global,dim_local,ghosts,x,u,
52  solver,mpi,fname_root); CHECKERR(ierr);
53 #ifndef serial
54  } else {
55  IERR WriteArrayParallel(ndims,nvars,dim_global,dim_local,ghosts,x,u,
56  solver,mpi,fname_root); CHECKERR(ierr);
57  }
58 #endif
59 
60  return(0);
61 }
#define IERR
Definition: basic.h:16
#define CHECKERR(ierr)
Definition: basic.h:18
char output_mode[_MAX_STRING_SIZE_]
Definition: hypar.h:183
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int(* WriteOutput)(int, int, int *, double *, double *, char *, int *)
Definition: hypar.h:211
Structure of MPI-related variables.
#define _DECLARE_IERR_
Definition: basic.h:17
static int WriteArrayParallel(int, int, int *, int *, int, double *, double *, void *, void *, char *)
Definition: WriteArray.c:194
static int WriteArraySerial(int, int, int *, int *, int, double *, double *, void *, void *, char *)
Definition: WriteArray.c:74