HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
SparseGridsMiscFuncs.cpp
Go to the documentation of this file.
1 
6 #include <cfloat>
7 #include <limits>
8 #include <math.h>
10 #include <arrayfunctions.h>
11 #include <mathfunctions.h>
12 #include <io_cpp.h>
13 #include <std_vec_ops.h>
14 
15 #ifdef with_python
16 #include <Python.h>
17 #endif
18 
19 extern "C" void IncrementFilenameIndex(char*,int);
20 
25 {
26  m_n_fg = log2(m_sim_fg->solver.dim_global[0]);
27  int d = m_ndims;
28 
29  double c1 = 1;
30  m_combination.clear();
31  for (int s = 1; s <= d; s++) {
32 
33  double coeff = c1 * cfunc(d-1,s-1);
34 
35  std::vector<GridDimensions> dims(0);
36  GetCTGridSizes((m_n_fg+(m_ndims-1)*(m_imin-1))+d-s, dims);
37  if (dims.size() == 0) {
38  fprintf(stderr, "Error in SparseGridsSimulation::ComputeSGDimsAndCoeffs()\n");
39  fprintf(stderr, " GetCTGridSize() returned empty vector!\n");
40  m_combination.clear();
41  return 1;
42  }
43  for (int i=0; i<dims.size(); i++) {
44  m_combination.push_back(std::pair<double,GridDimensions>(coeff,dims[i]));
45  }
46 
47  c1 *= -1;
48 
49  }
50 
51  return 0;
52 }
53 
57  std::vector<GridDimensions>& a_dims
58  )
59 {
60  a_dims.clear();
61 
62  int index[m_ndims], ubounds[m_ndims], lbounds[m_ndims];
64  _ArraySetValue_(ubounds, m_ndims, a_N);
65  _ArraySetValue_(lbounds, m_ndims, m_imin);
66 
67  int done = 0;
68  while (!done) {
69  int sum; _ArraySum1D_(index, sum, m_ndims);
70  if (sum == a_N) {
71  std::vector<int> tmp(m_ndims);
72  for (int d=0; d<m_ndims; d++) {
73  raiseto_int( tmp[d], 2,index[d] );
74  }
75  a_dims.push_back(tmp);
76  }
77  _ArrayIncrementIndexWithLBound_(m_ndims,ubounds,lbounds,index,done);
78  }
79 
80  return;
81 }
82 
86  const GridDimensions& a_dim )
87 {
88  a_iprocs.resize(m_ndims);
89  /* get the normal vector for the grid dimensions */
90  std::vector<double> dvec;
91  StdVecOps::createNormalVector(dvec, a_dim);
92 
93  /* calculate the maximum number of MPI ranks in each dimension */
94  int max_procs[m_ndims], min_procs[m_ndims];
95  for (int i=0; i<m_ndims; i++) {
96  max_procs[i] = a_dim[i]/_MIN_GRID_PTS_PER_PROC_;
97  min_procs[i] = 1;
98  }
99  int max_nproc; _ArrayProduct1D_(max_procs, m_ndims, max_nproc);
100  if (max_nproc < m_nproc) {
101  fprintf(stderr, "Error in SparseGridsSimulation::ComputeProcessorDistribution() - rank %d\n", m_rank);
102  fprintf(stderr, " Number of MPI ranks greater than the maximum number of MPI ranks that can be used.\n");
103  fprintf(stderr, " Please re-run with %d MPI ranks.\n", max_nproc);
104  return 1;
105  }
106 
107  /* find all the processor distributions that are okay, i.e., their product
108  * is the total number of MPI ranks #SparseGridsSimulation::m_nproc */
109  std::vector<ProcDistribution> iproc_candidates(0);
110  int iproc[m_ndims], ubound[m_ndims], done = 0;
111  for (int d=0; d<m_ndims; d++) ubound[d] = max_procs[d]+1;
112  _ArraySetValue_(iproc, m_ndims, 1);
113  while (!done) {
114  int prod; _ArrayProduct1D_(iproc, m_ndims, prod);
115  if (prod == m_nproc) {
116  ProcDistribution iproc_vec(m_ndims);
117  for (int d = 0; d < m_ndims; d++) iproc_vec[d] = iproc[d];
118  iproc_candidates.push_back(iproc_vec);
119  }
120  _ArrayIncrementIndexWithLBound_(m_ndims,ubound,min_procs,iproc,done);
121  }
122  if (iproc_candidates.size() == 0) {
123  fprintf(stderr, "Error in SparseGridsSimulation::ComputeProcessorDistribution() - rank %d\n", m_rank);
124  fprintf(stderr, " Unable to fine candidate iprocs!\n");
125  return 1;
126  }
127 
128  /* find the candidate that is closest to the normalized dim vector */
129  double min_norm = DBL_MAX;
130  for (int i = 0; i<iproc_candidates.size(); i++) {
131  std::vector<double> pvec;
132  StdVecOps::createNormalVector(pvec, iproc_candidates[i]);
133  double norm = StdVecOps::compute2Norm(pvec, dvec);
134  if (norm < min_norm) {
135  min_norm = norm;
136  a_iprocs = iproc_candidates[i];
137  }
138  }
139 
140  return 0;
141 }
142 
147 {
148  if (sim->is_barebones == 0) {
149  fprintf(stderr, "Error in SparseGridsSimulation::CleanupBarebones()\n");
150  fprintf(stderr, " Simulation object is not a barebones one.\n");
151  return 1;
152  }
153 
154  HyPar* solver = &(sim->solver);
155  MPIVariables* mpi = &(sim->mpi);
156 
157  /* Free the communicators created */
158  IERR MPIFreeCommunicators(solver->ndims,mpi); CHECKERR(ierr);
159 
160  /* These variables are allocated in Initialize.c */
161  free(solver->dim_global);
162  free(solver->dim_global_ex);
163  free(solver->dim_local);
164  free(solver->index);
165  free(solver->isPeriodic);
166  free(solver->u);
167  free(solver->x);
168  free(solver->dxinv);
169  free(mpi->iproc);
170  free(mpi->ip);
171  free(mpi->is);
172  free(mpi->ie);
173  free(mpi->bcperiodic);
174  free(mpi->sendbuf);
175  free(mpi->recvbuf);
176  free(solver->VolumeIntegral);
177  free(solver->VolumeIntegralInitial);
178  free(solver->TotalBoundaryIntegral);
179  free(solver->ConservationError);
180  free(solver->stride_with_ghosts);
181  free(solver->stride_without_ghosts);
182  if (solver->filename_index) free(solver->filename_index);
183 
184  return(0);
185 }
186 
195 {
196  /* Set this to "true" since this is a barebones initialization */
197  simobj->is_barebones = 1;
198 
199  HyPar* solver = &(simobj->solver);
200  MPIVariables* mpi = &(simobj->mpi);
201 
202  /* allocations */
203  mpi->ip = (int*) calloc (m_ndims,sizeof(int));
204  mpi->is = (int*) calloc (m_ndims,sizeof(int));
205  mpi->ie = (int*) calloc (m_ndims,sizeof(int));
206  mpi->bcperiodic = (int*) calloc (m_ndims,sizeof(int));
207  solver->dim_local = (int*) calloc (m_ndims,sizeof(int));
208  solver->isPeriodic = (int*) calloc (m_ndims,sizeof(int));
209 
210 #ifndef serial
211  /* Domain partitioning */
212  int total_proc = 1;
213  for (int i=0; i<m_ndims; i++) total_proc *= mpi->iproc[i];
214 
215  /* calculate ndims-D rank of each process (ip[]) from rank in MPI_COMM_WORLD */
217  m_rank,
218  mpi->iproc,
219  mpi->ip ); CHECKERR(ierr);
220 
221  /* calculate local domain sizes along each dimension */
222  for (int i=0; i<m_ndims; i++) {
223  solver->dim_local[i] = MPIPartition1D( solver->dim_global[i],
224  mpi->iproc[i],
225  mpi->ip[i] );
226  }
227 
228  /* calculate local domain limits in terms of global domain */
230  m_rank,
231  &(simobj->mpi),
232  solver->dim_global,
233  mpi->is,
234  mpi->ie );
235  CHECKERR(ierr);
236 
237  /* create sub-communicators for parallel computations
238  * along grid lines in each dimension */
239  IERR MPICreateCommunicators(m_ndims,&(simobj->mpi)); CHECKERR(ierr);
240 
241  /* initialize periodic BC flags to zero */
242  for (int i=0; i<solver->ndims; i++) mpi->bcperiodic[i] = 0;
243 
244  /* create communication groups */
245  IERR MPICreateIOGroups(&(simobj->mpi)); CHECKERR(ierr);
246 
247 #else
248 
249  for (int i=0; i<m_ndims; i++) {
250  mpi->ip[i] = 0;
251  solver->dim_local[i] = solver->dim_global[i];
252  mpi->iproc[i] = 1;
253  mpi->is[i] = 0;
254  mpi->ie[i] = solver->dim_local[i];
255  mpi->bcperiodic[i] = 0;
256  }
257 
258 #endif
259 
260  solver->npoints_global
261  = solver->npoints_local
262  = solver->npoints_local_wghosts
263  = 1;
264  for (int i=0; i<m_ndims; i++) {
265  solver->npoints_global *= solver->dim_global[i];
266  solver->npoints_local *= solver->dim_local [i];
267  solver->npoints_local_wghosts *= (solver->dim_local[i]+2*solver->ghosts);
268  }
269 
270  /* Allocations */
271  if (!m_rank) printf("Allocating data arrays for full grid.\n");
272  solver->index = (int*) calloc (m_ndims,sizeof(int));
273  solver->stride_with_ghosts = (int*) calloc (solver->ndims,sizeof(int));
274  solver->stride_without_ghosts = (int*) calloc (solver->ndims,sizeof(int));
275  int accu1 = 1, accu2 = 1;
276  for (int i=0; i<solver->ndims; i++) {
277  solver->stride_with_ghosts[i] = accu1;
278  solver->stride_without_ghosts[i] = accu2;
279  accu1 *= (solver->dim_local[i]+2*solver->ghosts);
280  accu2 *= solver->dim_local[i];
281  }
282 
283 
284  int size;
285  /* state variables */
286  size = 1;
287  for (int i=0; i<m_ndims; i++) size *= (solver->dim_local[i]+2*solver->ghosts);
288  solver->u = (double*) calloc (solver->nvars*size,sizeof(double));
289 
290  /* grid */
291  size = 0;
292  for (int i=0; i<m_ndims; i++) size += (solver->dim_local[i]+2*solver->ghosts);
293  solver->x = (double*) calloc (size,sizeof(double));
294  solver->dxinv = (double*) calloc (size,sizeof(double));
295 
296  /* allocate MPI send/receive buffer arrays */
297  int bufdim[solver->ndims], maxbuf = 0;
298  for (int d = 0; d < solver->ndims; d++) {
299  bufdim[d] = 1;
300  for (int i = 0; i < solver->ndims; i++) {
301  if (i == d) bufdim[d] *= solver->ghosts;
302  else bufdim[d] *= solver->dim_local[i];
303  }
304  if (bufdim[d] > maxbuf) maxbuf = bufdim[d];
305  }
306  maxbuf *= solver->nvars;
307  mpi->maxbuf = maxbuf;
308  mpi->sendbuf = (double*) calloc (2*solver->ndims*maxbuf,sizeof(double));
309  mpi->recvbuf = (double*) calloc (2*solver->ndims*maxbuf,sizeof(double));
310 
311  solver->VolumeIntegral = (double*) calloc (solver->nvars,sizeof(double));
312  solver->VolumeIntegralInitial = (double*) calloc (solver->nvars,sizeof(double));
313  solver->TotalBoundaryIntegral = (double*) calloc (solver->nvars,sizeof(double));
314  solver->ConservationError = (double*) calloc (solver->nvars,sizeof(double));
315 
316  for (int i=0; i<solver->nvars; i++) solver->ConservationError[i] = -1;
317 
318  return(0);
319 }
320 
325 {
326  if (sim->is_barebones != 1) {
327  fprintf(stderr, "Error in SparseGridsSimulation::InitializeSolversBarebones() - \n");
328  fprintf(stderr, " simulation object is not barebones type.\n");
329  }
330 
331  int ns;
332 
333  HyPar *solver = &(sim->solver);
334  MPIVariables *mpi = &(sim->mpi);
335 
336  solver->ParabolicFunction = NULL;
337  solver->SecondDerivativePar = NULL;
338  solver->FirstDerivativePar = NULL;
339  solver->InterpolateInterfacesPar = NULL;
340 
341  solver->interp = NULL;
342  solver->compact = NULL;
343  solver->lusolver = NULL;
344  solver->SetInterpLimiterVar = NULL;
345  solver->flag_nonlinearinterp = 0;
346  solver->time_integrator = NULL;
347  solver->msti = NULL;
348 
349  /* Solution output function */
350  solver->WriteOutput = NULL; /* default - no output */
351  solver->filename_index = NULL;
352  if (!strcmp(solver->output_mode,"serial")) {
353  solver->index_length = 5;
354  solver->filename_index = (char*) calloc (solver->index_length+1,sizeof(char));
355  int i; for (i=0; i<solver->index_length; i++) solver->filename_index[i] = '0';
356  solver->filename_index[solver->index_length] = (char) 0;
357  if (!strcmp(solver->op_file_format,"text")) {
358  solver->WriteOutput = WriteText;
359  strcpy(solver->solnfilename_extn,".dat");
360  } else if (!strcmp(solver->op_file_format,"tecplot2d")) {
361  solver->WriteOutput = WriteTecplot2D;
362  strcpy(solver->solnfilename_extn,".dat");
363  } else if (!strcmp(solver->op_file_format,"tecplot3d")) {
364  solver->WriteOutput = WriteTecplot3D;
365  strcpy(solver->solnfilename_extn,".dat");
366  } else if ((!strcmp(solver->op_file_format,"binary")) || (!strcmp(solver->op_file_format,"bin"))) {
367  solver->WriteOutput = WriteBinary;
368  strcpy(solver->solnfilename_extn,".bin");
369  } else if (!strcmp(solver->op_file_format,"none")) {
370  solver->WriteOutput = NULL;
371  } else {
372  fprintf(stderr,"Error (domain %d): %s is not a supported file format.\n",
373  ns, solver->op_file_format);
374  return(1);
375  }
376  if ((!strcmp(solver->op_overwrite,"no")) && solver->restart_iter) {
377  /* if it's a restart run, fast-forward the filename */
378  int t;
379  for (t=0; t<solver->restart_iter; t++)
380  if ((t+1)%solver->file_op_iter == 0) IncrementFilenameIndex(solver->filename_index,solver->index_length);
381  }
382  } else if (!strcmp(solver->output_mode,"parallel")) {
383  if (!strcmp(solver->op_file_format,"none")) solver->WriteOutput = NULL;
384  else {
385  /* only binary file writing supported in parallel mode */
386  /* use post-processing scripts to convert */
387  solver->WriteOutput = WriteBinary;
388  strcpy(solver->solnfilename_extn,".bin");
389  }
390  } else {
391 
392  fprintf(stderr,"Error (domain %d): %s is not a supported output mode.\n",
393  ns, solver->output_mode);
394  fprintf(stderr,"Should be \"serial\" or \"parallel\". \n");
395  return(1);
396 
397  }
398 
399  /* Solution plotting function */
400  strcpy(solver->plotfilename_extn,".png");
401 #ifdef with_python
402  solver->py_plt_func = NULL;
403  solver->py_plt_func_args = NULL;
404  {
405  char python_plotting_fname[_MAX_STRING_SIZE_] = "plotSolution";
406  PyObject* py_plot_name = PyUnicode_DecodeFSDefault(python_plotting_fname);
407  PyObject* py_plot_module = PyImport_Import(py_plot_name);
408  Py_DECREF(py_plot_name);
409  if (py_plot_module) {
410  solver->py_plt_func = PyObject_GetAttrString(py_plot_module, "plotSolution");
411  if (!solver->py_plt_func) {
412  if (!mpi->rank) {
413  printf("Unable to load plotSolution function from Python module.\n");
414  }
415  } else {
416  if (!mpi->rank) {
417  printf("Loaded Python module for plotting.\n");
418  printf("Loaded plotSolution function from Python module.\n");
419  }
420  }
421  } else {
422  if (!mpi->rank) {
423  printf("Unable to load Python module for plotting.\n");
424  }
425  }
426  }
427 #endif
428 
429  return 0;
430 }
void GetCTGridSizes(const int, std::vector< GridDimensions > &)
double * recvbuf
int WriteBinary(int, int, int *, double *, double *, char *, int *)
Definition: WriteBinary.c:34
int nvars
Definition: hypar.h:29
int(* SecondDerivativePar)(double *, double *, int, void *, void *)
Definition: hypar.h:247
int InitializeBarebones(SimulationObject *)
int WriteTecplot3D(int, int, int *, double *, double *, char *, int *)
#define IERR
Definition: basic.h:16
char * filename_index
Definition: hypar.h:197
#define CHECKERR(ierr)
Definition: basic.h:18
Contains function definitions for common mathematical functions.
int(* ParabolicFunction)(double *, double *, void *, void *, double)
Definition: hypar.h:256
Structure defining a simulation.
void * time_integrator
Definition: hypar.h:165
char plotfilename_extn[_MAX_STRING_SIZE_]
Definition: hypar.h:203
std::vector< int > GridDimensions
int * dim_global_ex
Definition: hypar.h:75
long sum(const std::vector< int > &a_iv)
Definition: std_vec_ops.h:18
double * u
Definition: hypar.h:116
int InitializeSolversBarebones(SimulationObject *)
int npoints_local
Definition: hypar.h:42
double * ConservationError
Definition: hypar.h:374
char output_mode[_MAX_STRING_SIZE_]
Definition: hypar.h:183
double * x
Definition: hypar.h:107
void * py_plt_func
Definition: hypar.h:466
int restart_iter
Definition: hypar.h:58
int * stride_with_ghosts
Definition: hypar.h:414
int ndims
Definition: hypar.h:26
int WriteText(int, int, int *, double *, double *, char *, int *)
Definition: WriteText.c:27
int(* SetInterpLimiterVar)(double *, double *, double *, int, void *, void *)
Definition: hypar.h:234
void IncrementFilenameIndex(char *, int)
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _MAX_STRING_SIZE_
Definition: basic.h:14
int * stride_without_ghosts
Definition: hypar.h:416
double * sendbuf
void createNormalVector(std::vector< double > &a_normal_vec, const int *a_vec, const int a_size)
Definition: std_vec_ops.h:70
#define _MIN_GRID_PTS_PER_PROC_
Definition: basic.h:39
Function declarations for file I/O functions.
int MPIFreeCommunicators(int, void *)
int npoints_global
Definition: hypar.h:40
std::vector< SGCTElem > m_combination
int ComputeProcessorDistribution(ProcDistribution &, const GridDimensions &)
char op_overwrite[_MAX_STRING_SIZE_]
Definition: hypar.h:191
int * index
Definition: hypar.h:102
#define _ArrayIncrementIndexWithLBound_(N, imax, imin, i, done)
int index_length
Definition: hypar.h:199
void * lusolver
Definition: hypar.h:368
double compute2Norm(const std::vector< double > &a_a, const std::vector< double > &a_b)
Definition: std_vec_ops.h:102
char solnfilename_extn[_MAX_STRING_SIZE_]
Definition: hypar.h:201
Contains some vector ops.
int MPIPartition1D(int, int, int)
#define _ArraySetValue_(x, size, value)
int * dim_local
Definition: hypar.h:37
void * interp
Definition: hypar.h:362
std::vector< int > ProcDistribution
int WriteTecplot2D(int, int, int *, double *, double *, char *, int *)
#define raiseto_int(y, x, a)
Definition: math_ops.h:42
int(* FirstDerivativePar)(double *, double *, int, int, void *, void *)
Definition: hypar.h:243
int(* InterpolateInterfacesPar)(double *, double *, int, void *, void *)
Definition: hypar.h:239
int CleanupBarebones(SimulationObject *)
#define _ArraySum1D_(x, a, size)
double * TotalBoundaryIntegral
Definition: hypar.h:386
int flag_nonlinearinterp
Definition: hypar.h:411
double cfunc(int a, int b)
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
int(* WriteOutput)(int, int, int *, double *, double *, char *, int *)
Definition: hypar.h:211
int ghosts
Definition: hypar.h:52
int MPICreateIOGroups(void *)
Definition: MPIIOGroups.c:37
Structure of MPI-related variables.
void * msti
Definition: hypar.h:366
int npoints_local_wghosts
Definition: hypar.h:42
char op_file_format[_MAX_STRING_SIZE_]
Definition: hypar.h:186
int MPIRanknD(int, int, int *, int *)
Definition: MPIRanknD.c:27
Contains macros and function definitions for common array operations.
int * dim_global
Definition: hypar.h:33
#define _ArrayProduct1D_(x, size, p)
int * isPeriodic
Definition: hypar.h:162
double * VolumeIntegral
Definition: hypar.h:378
int MPICreateCommunicators(int, void *)
void * py_plt_func_args
Definition: hypar.h:467
void * compact
Definition: hypar.h:364
int file_op_iter
Definition: hypar.h:171
double * VolumeIntegralInitial
Definition: hypar.h:380
double * dxinv
Definition: hypar.h:110