HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
simulation.h File Reference

Base class for simulation and declarations for C functions. More...

#include <simulation_object.h>
#include <mpivars_cpp.h>
#include <petscinterface.h>

Go to the source code of this file.

Data Structures

class  Simulation
 Base class for a simulation. More...
 

Functions

int ReadInputs (void *, int, int)
 
int WriteInputs (void *, int, int)
 
int Initialize (void *, int)
 
int Initialize_GPU (void *, int)
 
int InitialSolution (void *, int)
 
int InitializeBoundaries (void *, int)
 
int InitializeImmersedBoundaries (void *, int)
 
int InitializePhysics (void *, int)
 
int InitializePhysicsData (void *, int, int, int *)
 
int InitializeSolvers (void *, int)
 
int Cleanup (void *, int)
 
void SimWriteErrors (void *, int, int, double, double)
 
int SolvePETSc (void *, int, int, int)
 Integrate in time with PETSc. More...
 
int Solve (void *, int, int, int)
 

Detailed Description

Base class for simulation and declarations for C functions.

Author
Debojyoti Ghosh

Definition in file simulation.h.

Function Documentation

◆ ReadInputs()

int ReadInputs ( void *  s,
int  nsims,
int  rank 
)

Read the input parameters

Read the simulation inputs from the file solver.inp. Rank 0 reads in the inputs and broadcasts them to all the processors.

The format of solver.inp is as follows:

begin
    <keyword>   <value>
    <keyword>   <value>
    <keyword>   <value>
    ...
    <keyword>   <value>
end

where the list of keywords and their type are:

Keyword name Type Variable Default value ---------------—
ndims int HyPar::ndims 1
nvars int HyPar::nvars 1
size int[ndims] HyPar::dim_global must be specified
iproc int[ndims] MPIVariables::iproc must be specified (see notes below)
ghost int HyPar::ghosts 1
n_iter int HyPar::n_iter 0
restart_iter int HyPar::restart_iter 0
time_scheme char[] HyPar::time_scheme euler
time_scheme_type char[] HyPar::time_scheme_type none
hyp_space_scheme char[] HyPar::spatial_scheme_hyp 1
hyp_flux_split char[] HyPar::SplitHyperbolicFlux no
hyp_interp_type char[] HyPar::interp_type characteristic
par_space_type char[] HyPar::spatial_type_par nonconservative-1stage
par_space_scheme char[] HyPar::spatial_scheme_par 2
dt double HyPar::dt 0.0
conservation_check char[] HyPar::ConservationCheck no
screen_op_iter int HyPar::screen_op_iter 1
file_op_iter int HyPar::file_op_iter 1000
op_file_format char[] HyPar::op_file_format text
ip_file_type char[] HyPar::ip_file_type ascii
input_mode char[] HyPar::input_mode serial
output_mode char[] HyPar::output_mode serial
op_overwrite char[] HyPar::op_overwrite no
plot_solution char[] HyPar::plot_solution no
model char[] HyPar::model must be specified
immersed_body char[] HyPar::ib_filename "none"
size_exact int[ndims] HyPar::dim_global_ex HyPar::dim_global
use_gpu char[] HyPar::use_gpu no
gpu_device_no int HyPar::gpu_device_no -1

Notes:

  • "ndims" must be specified before "size".
  • the input "iproc" is ignored when running a sparse grids simulation.
  • if "input_mode" or "output_mode" are set to "parallel" or "mpi-io", the number of I/O ranks must be specified right after as an integer. For example:

    begin
        ...
        input_mode  parallel 4
        ...
    end
    

    This means that 4 MPI ranks will participate in file I/O (assuming total MPI ranks is more than 4) (see ReadArrayParallel(), WriteArrayParallel(), ReadArrayMPI_IO() ).

    • The number of I/O ranks specified for "input_mode" and "output_mode" must be same. Otherwise, the value for the one specified last will be used.
    • The number of I/O ranks must be such that the total number of MPI ranks is an integer multiple. Otherwise, the code will use only 1 I/O rank.
  • If any of the keywords are not present, the default value is used, except the ones whose default values say "must be specified". Thus, keywords that are not required for a particular simulation may be left out of the solver.inp input file. For example,
    • a Euler1D simulation does not need "par_space_type" or "par_space_scheme" because it does not have a parabolic term.
    • unless a conservation check is required, "conservation_check" can be left out and the code will not check for conservation.
    • "immersed_body" need not be specified if there are no immersed bodies present. NOTE: However, if it is specified, and a file of that filename does not exist, it will result in an error.
Parameters
sArray of simulation objects of type SimulationObject of size nsims
nsimsNumber of simulation objects
rankMPI rank of this process

Definition at line 93 of file ReadInputs.c.

98 {
100  int n, ferr = 0;
101 
102  if (sim == NULL) {
103  printf("Error: simulation object array is NULL!\n");
104  printf("Please consider killing this run.\n");
105  return(1);
106  }
107 
108  if (!rank) {
109 
110  /* set some default values for optional inputs */
111  for (n = 0; n < nsims; n++) {
112  sim[n].solver.ndims = 1;
113  sim[n].solver.nvars = 1;
114  sim[n].solver.ghosts = 1;
115  sim[n].solver.dim_global = NULL;
116  sim[n].solver.dim_local = NULL;
117  sim[n].solver.dim_global_ex = NULL;
118  sim[n].mpi.iproc = NULL;
119  sim[n].mpi.N_IORanks = 1;
120  sim[n].solver.dt = 0.0;
121  sim[n].solver.n_iter = 0;
122  sim[n].solver.restart_iter = 0;
123  sim[n].solver.screen_op_iter = 1;
124  sim[n].solver.file_op_iter = 1000;
125  sim[n].solver.write_residual = 0;
126  sim[n].solver.flag_ib = 0;
127 #if defined(HAVE_CUDA)
128  sim[n].solver.use_gpu = 0;
129  sim[n].solver.gpu_device_no = -1;
130 #endif
131  strcpy(sim[n].solver.time_scheme ,"euler" );
132  strcpy(sim[n].solver.time_scheme_type ," " );
133  strcpy(sim[n].solver.spatial_scheme_hyp ,"1" );
134  strcpy(sim[n].solver.spatial_type_par ,_NC_1STAGE_ );
135  strcpy(sim[n].solver.spatial_scheme_par ,"2" );
136  strcpy(sim[n].solver.interp_type ,"characteristic");
137  strcpy(sim[n].solver.ip_file_type ,"ascii" );
138  strcpy(sim[n].solver.input_mode ,"serial" );
139  strcpy(sim[n].solver.output_mode ,"serial" );
140  strcpy(sim[n].solver.op_file_format ,"text" );
141  strcpy(sim[n].solver.op_overwrite ,"no" );
142  strcpy(sim[n].solver.plot_solution ,"no" );
143  strcpy(sim[n].solver.model ,"none" );
144  strcpy(sim[n].solver.ConservationCheck ,"no" );
145  strcpy(sim[n].solver.SplitHyperbolicFlux,"no" );
146  strcpy(sim[n].solver.ib_filename ,"none" );
147  }
148 
149  /* open the file */
150  FILE *in;
151  printf("Reading solver inputs from file \"solver.inp\".\n");
152  in = fopen("solver.inp","r");
153  if (!in) {
154  fprintf(stderr,"Error: File \"solver.inp\" not found.\n");
155  fprintf(stderr,"Please consider killing this run.\n");
156  return(1);
157  }
158 
159  /* reading solver inputs */
160  char word[_MAX_STRING_SIZE_];
161  ferr = fscanf(in,"%s",word); if (ferr != 1) return(1);
162 
163  if (!strcmp(word, "begin")){
164 
165  while (strcmp(word, "end")) {
166 
167  ferr = fscanf(in,"%s",word); if (ferr != 1) return(1);
168 
169  if (!strcmp(word, "ndims")) {
170 
171  ferr = fscanf(in,"%d",&(sim[0].solver.ndims)); if (ferr != 1) return(1);
172  sim[0].solver.dim_global = (int*) calloc (sim[0].solver.ndims,sizeof(int));
173  sim[0].mpi.iproc = (int*) calloc (sim[0].solver.ndims,sizeof(int));
174  sim[0].solver.dim_global_ex = (int*) calloc (sim[0].solver.ndims,sizeof(int));
175 
176  int n;
177  for (n = 1; n < nsims; n++) {
178  sim[n].solver.ndims = sim[0].solver.ndims;
179  sim[n].solver.dim_global = (int*) calloc (sim[n].solver.ndims,sizeof(int));
180  sim[n].mpi.iproc = (int*) calloc (sim[n].solver.ndims,sizeof(int));
181  sim[n].solver.dim_global_ex = (int*) calloc (sim[n].solver.ndims,sizeof(int));
182  }
183 
184  } else if (!strcmp(word, "nvars")) {
185 
186  ferr = fscanf(in,"%d",&(sim[0].solver.nvars));
187  for (int n = 1; n < nsims; n++) sim[n].solver.nvars = sim[0].solver.nvars;
188 
189  } else if (!strcmp(word, "size")) {
190 
191  for (int n = 0; n < nsims; n++) {
192  if (!sim[n].solver.dim_global) {
193  fprintf(stderr,"Error in ReadInputs(): dim_global not allocated for n=%d.\n", n);
194  fprintf(stderr,"Please specify ndims before dimensions.\n" );
195  return(1);
196  } else {
197  for (int i=0; i<sim[n].solver.ndims; i++) {
198  ferr = fscanf(in,"%d",&(sim[n].solver.dim_global[i]));
199  if (ferr != 1) {
200  fprintf(stderr,"Error in ReadInputs() while reading grid sizes for domain %d.\n", n);
201  return(1);
202  }
203  sim[n].solver.dim_global_ex[i] = sim[n].solver.dim_global[i];
204  }
205  }
206  }
207 
208  } else if (!strcmp(word, "size_exact")) {
209 
210  for (int n = 0; n < nsims; n++) {
211  if (!sim[n].solver.dim_global_ex) {
212  fprintf(stderr,"Error in ReadInputs(): dim_global_ex not allocated for n=%d.\n", n);
213  fprintf(stderr,"Please specify ndims before dimensions.\n" );
214  return(1);
215  } else {
216  for (int i=0; i<sim[n].solver.ndims; i++) {
217  ferr = fscanf(in,"%d",&(sim[n].solver.dim_global_ex[i]));
218  if (ferr != 1) {
219  fprintf(stderr,"Error in ReadInputs() while reading exact solution grid sizes for domain %d.\n", n);
220  return(1);
221  }
222  }
223  }
224  }
225 
226  } else if (!strcmp(word, "iproc")) {
227 
228  int n;
229  for (n = 0; n < nsims; n++) {
230  if (!sim[n].mpi.iproc) {
231  fprintf(stderr,"Error in ReadInputs(): iproc not allocated for n=%d.\n", n);
232  fprintf(stderr,"Please specify ndims before iproc.\n" );
233  return(1);
234  } else {
235  int i;
236  for (i=0; i<sim[n].solver.ndims; i++) {
237  ferr = fscanf(in,"%d",&(sim[n].mpi.iproc[i]));
238  if (ferr != 1) {
239  fprintf(stderr,"Error in ReadInputs() while reading iproc for domain %d.\n", n);
240  return(1);
241  }
242  }
243  }
244  }
245 
246  } else if (!strcmp(word, "ghost")) {
247 
248  ferr = fscanf(in,"%d",&(sim[0].solver.ghosts));
249 
250  int n;
251  for (n = 1; n < nsims; n++) sim[n].solver.ghosts = sim[0].solver.ghosts;
252 
253  } else if (!strcmp(word, "n_iter")) {
254 
255  ferr = fscanf(in,"%d",&(sim[0].solver.n_iter));
256 
257  int n;
258  for (n = 1; n < nsims; n++) sim[n].solver.n_iter = sim[0].solver.n_iter;
259 
260  } else if (!strcmp(word, "restart_iter")) {
261 
262  ferr = fscanf(in,"%d",&(sim[0].solver.restart_iter));
263 
264  int n;
265  for (n = 1; n < nsims; n++) sim[n].solver.restart_iter = sim[0].solver.restart_iter;
266 
267  } else if (!strcmp(word, "time_scheme")) {
268 
269  ferr = fscanf(in,"%s",sim[0].solver.time_scheme);
270 
271  int n;
272  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.time_scheme, sim[0].solver.time_scheme);
273 
274  } else if (!strcmp(word, "time_scheme_type" )) {
275 
276  ferr = fscanf(in,"%s",sim[0].solver.time_scheme_type);
277 
278  int n;
279  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.time_scheme_type, sim[0].solver.time_scheme_type);
280 
281  } else if (!strcmp(word, "hyp_space_scheme")) {
282 
283  ferr = fscanf(in,"%s",sim[0].solver.spatial_scheme_hyp);
284 
285  int n;
286  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.spatial_scheme_hyp, sim[0].solver.spatial_scheme_hyp);
287 
288  } else if (!strcmp(word, "hyp_flux_split")) {
289 
290  ferr = fscanf(in,"%s",sim[0].solver.SplitHyperbolicFlux);
291 
292  int n;
293  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.SplitHyperbolicFlux, sim[0].solver.SplitHyperbolicFlux);
294 
295  } else if (!strcmp(word, "hyp_interp_type")) {
296 
297  ferr = fscanf(in,"%s",sim[0].solver.interp_type);
298 
299  int n;
300  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.interp_type, sim[0].solver.interp_type);
301 
302  } else if (!strcmp(word, "par_space_type")) {
303 
304  ferr = fscanf(in,"%s",sim[0].solver.spatial_type_par);
305 
306  int n;
307  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.spatial_type_par, sim[0].solver.spatial_type_par);
308 
309  } else if (!strcmp(word, "par_space_scheme")) {
310 
311  ferr = fscanf(in,"%s",sim[0].solver.spatial_scheme_par);
312 
313  int n;
314  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.spatial_scheme_par, sim[0].solver.spatial_scheme_par);
315 
316  } else if (!strcmp(word, "dt")) {
317 
318  ferr = fscanf(in,"%lf",&(sim[0].solver.dt));
319 
320  int n;
321  for (n = 1; n < nsims; n++) sim[n].solver.dt = sim[0].solver.dt;
322 
323  } else if (!strcmp(word, "conservation_check" )) {
324 
325  ferr = fscanf(in,"%s",sim[0].solver.ConservationCheck);
326 
327  int n;
328  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.ConservationCheck, sim[0].solver.ConservationCheck);
329 
330  } else if (!strcmp(word, "screen_op_iter")) {
331 
332  ferr = fscanf(in,"%d",&(sim[0].solver.screen_op_iter));
333 
334  int n;
335  for (n = 1; n < nsims; n++) sim[n].solver.screen_op_iter = sim[0].solver.screen_op_iter;
336 
337  } else if (!strcmp(word, "file_op_iter")) {
338 
339  ferr = fscanf(in,"%d",&(sim[0].solver.file_op_iter));
340 
341  int n;
342  for (n = 1; n < nsims; n++) sim[n].solver.file_op_iter = sim[0].solver.file_op_iter;
343 
344  } else if (!strcmp(word, "op_file_format")) {
345 
346  ferr = fscanf(in,"%s",sim[0].solver.op_file_format);
347 
348  int n;
349  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.op_file_format, sim[0].solver.op_file_format);
350 
351  } else if (!strcmp(word, "ip_file_type")) {
352 
353  ferr = fscanf(in,"%s",sim[0].solver.ip_file_type);
354 
355  int n;
356  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.ip_file_type, sim[0].solver.ip_file_type);
357 
358  } else if (!strcmp(word, "input_mode")) {
359 
360  ferr = fscanf(in,"%s",sim[0].solver.input_mode);
361  if (strcmp(sim[0].solver.input_mode,"serial")) ferr = fscanf(in,"%d",&(sim[0].mpi.N_IORanks));
362 
363  int n;
364  for (n = 1; n < nsims; n++) {
365  strcpy(sim[n].solver.input_mode, sim[0].solver.input_mode);
366  if (strcmp(sim[n].solver.input_mode,"serial")) sim[n].mpi.N_IORanks = sim[0].mpi.N_IORanks;
367  }
368 
369  } else if (!strcmp(word, "output_mode")) {
370 
371  ferr = fscanf(in,"%s",sim[0].solver.output_mode);
372  if (strcmp(sim[0].solver.output_mode,"serial")) ferr = fscanf(in,"%d",&(sim[0].mpi.N_IORanks));
373 
374  int n;
375  for (n = 1; n < nsims; n++) {
376  strcpy(sim[n].solver.output_mode, sim[0].solver.output_mode);
377  if (strcmp(sim[n].solver.output_mode,"serial")) sim[n].mpi.N_IORanks = sim[0].mpi.N_IORanks;
378  }
379 
380  } else if (!strcmp(word, "op_overwrite")) {
381 
382  ferr = fscanf(in,"%s",sim[0].solver.op_overwrite);
383 
384  int n;
385  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.op_overwrite, sim[0].solver.op_overwrite);
386 
387  } else if (!strcmp(word, "plot_solution")) {
388 
389  ferr = fscanf(in,"%s",sim[0].solver.plot_solution);
390 
391  int n;
392  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.plot_solution, sim[0].solver.plot_solution);
393 
394  } else if (!strcmp(word, "model")) {
395 
396  ferr = fscanf(in,"%s",sim[0].solver.model);
397 
398  int n;
399  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.model, sim[0].solver.model);
400 
401  } else if (!strcmp(word, "immersed_body")) {
402 
403  ferr = fscanf(in,"%s",sim[0].solver.ib_filename);
404 
405  int n;
406  for (n = 1; n < nsims; n++) strcpy(sim[n].solver.ib_filename, sim[0].solver.ib_filename);
407 
408  }
409 #if defined(HAVE_CUDA)
410  else if (!strcmp(word, "use_gpu")) {
411  ferr = fscanf(in,"%s",word);
412  if (!strcmp(word, "yes") || !strcmp(word, "true")) sim[0].solver.use_gpu = 1;
413 
414  int n;
415  for (n = 1; n < nsims; n++) sim[n].solver.use_gpu = sim[0].solver.use_gpu;
416  } else if (!strcmp(word, "gpu_device_no")) {
417  ferr = fscanf(in,"%d", &sim[0].solver.gpu_device_no);
418 
419  int n;
420  for (n = 1; n < nsims; n++) sim[n].solver.gpu_device_no = sim[0].solver.gpu_device_no;
421  }
422 #endif
423  else if (strcmp(word, "end")) {
424 
425  char useless[_MAX_STRING_SIZE_];
426  ferr = fscanf(in,"%s",useless);
427  printf("Warning: keyword %s in file \"solver.inp\" with value %s not recognized or extraneous. Ignoring.\n",
428  word,useless);
429 
430  }
431  if (ferr != 1) return(1);
432 
433  }
434 
435  } else {
436 
437  fprintf(stderr,"Error: Illegal format in file \"solver.inp\".\n");
438  return(1);
439 
440  }
441 
442  /* close the file */
443  fclose(in);
444 
445  /* some checks */
446  for (n = 0; n < nsims; n++) {
447 
448  if (sim[n].solver.screen_op_iter <= 0) sim[n].solver.screen_op_iter = 1;
449  if (sim[n].solver.file_op_iter <= 0) sim[n].solver.file_op_iter = sim[n].solver.n_iter;
450 
451  if ((sim[n].solver.ndims != 3) && (strcmp(sim[n].solver.ib_filename,"none"))) {
452  printf("Warning: immersed boundaries not implemented for ndims = %d. ",sim[n].solver.ndims);
453  printf("Ignoring input for \"immersed_body\" (%s).\n",sim[n].solver.ib_filename);
454  strcpy(sim[n].solver.ib_filename,"none");
455  }
456  sim[n].solver.flag_ib = strcmp(sim[n].solver.ib_filename,"none");
457 
458  /* restart only supported for binary output files */
459  if ((sim[n].solver.restart_iter != 0) && strcmp(sim[n].solver.op_file_format,"binary")) {
460  if (!sim[n].mpi.rank) fprintf(stderr,"Error in ReadInputs(): Restart is supported only for binary output files.\n");
461  return(1);
462  }
463  }
464  }
465 
466 #ifndef serial
467  for (n = 0; n < nsims; n++) {
468 
469  /* Broadcast the input parameters */
470  MPIBroadcast_integer(&(sim[n].solver.ndims),1,0,&(sim[n].mpi.world));
471  if (sim[n].mpi.rank) {
472  sim[n].solver.dim_global = (int*) calloc (sim[n].solver.ndims,sizeof(int));
473  sim[n].mpi.iproc = (int*) calloc (sim[n].solver.ndims,sizeof(int));
474  sim[n].solver.dim_global_ex = (int*) calloc (sim[n].solver.ndims,sizeof(int));
475  }
476  MPIBroadcast_integer(&(sim[n].solver.nvars) ,1 ,0,&(sim[n].mpi.world));
477  MPIBroadcast_integer( sim[n].solver.dim_global ,sim[n].solver.ndims,0,&(sim[n].mpi.world));
478  MPIBroadcast_integer( sim[n].solver.dim_global_ex ,sim[n].solver.ndims,0,&(sim[n].mpi.world));
479  MPIBroadcast_integer( sim[n].mpi.iproc ,sim[n].solver.ndims,0,&(sim[n].mpi.world));
480  MPIBroadcast_integer(&(sim[n].mpi.N_IORanks) ,1 ,0,&(sim[n].mpi.world));
481  MPIBroadcast_integer(&(sim[n].solver.ghosts) ,1 ,0,&(sim[n].mpi.world));
482  MPIBroadcast_integer(&(sim[n].solver.n_iter) ,1 ,0,&(sim[n].mpi.world));
483  MPIBroadcast_integer(&(sim[n].solver.restart_iter) ,1 ,0,&(sim[n].mpi.world));
484  MPIBroadcast_integer(&(sim[n].solver.screen_op_iter),1 ,0,&(sim[n].mpi.world));
485  MPIBroadcast_integer(&(sim[n].solver.file_op_iter) ,1 ,0,&(sim[n].mpi.world));
486  MPIBroadcast_integer(&(sim[n].solver.flag_ib) ,1 ,0,&(sim[n].mpi.world));
487 #if defined(HAVE_CUDA)
488  MPIBroadcast_integer(&(sim[n].solver.use_gpu) ,1 ,0,&(sim[n].mpi.world));
489  MPIBroadcast_integer(&(sim[n].solver.gpu_device_no) ,1 ,0,&(sim[n].mpi.world));
490 #endif
491  MPIBroadcast_character(sim[n].solver.time_scheme ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
492  MPIBroadcast_character(sim[n].solver.time_scheme_type ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
493  MPIBroadcast_character(sim[n].solver.spatial_scheme_hyp ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
494  MPIBroadcast_character(sim[n].solver.interp_type ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
495  MPIBroadcast_character(sim[n].solver.spatial_type_par ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
496  MPIBroadcast_character(sim[n].solver.spatial_scheme_par ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
497  MPIBroadcast_character(sim[n].solver.ConservationCheck ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
498  MPIBroadcast_character(sim[n].solver.SplitHyperbolicFlux,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
499  MPIBroadcast_character(sim[n].solver.op_file_format ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
500  MPIBroadcast_character(sim[n].solver.ip_file_type ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
501  MPIBroadcast_character(sim[n].solver.input_mode ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
502  MPIBroadcast_character(sim[n].solver.output_mode ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
503  MPIBroadcast_character(sim[n].solver.op_overwrite ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
504  MPIBroadcast_character(sim[n].solver.plot_solution ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
505  MPIBroadcast_character(sim[n].solver.model ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
506  MPIBroadcast_character(sim[n].solver.ib_filename ,_MAX_STRING_SIZE_,0,&(sim[n].mpi.world));
507 
508  MPIBroadcast_double(&(sim[n].solver.dt),1,0,&(sim[n].mpi.world));
509  }
510 #endif
511 
512  return 0;
513 }
int nvars
Definition: hypar.h:29
Structure defining a simulation.
int * dim_global_ex
Definition: hypar.h:75
int MPIBroadcast_integer(int *, int, int, void *)
Definition: MPIBroadcast.c:23
int n_iter
Definition: hypar.h:55
int flag_ib
Definition: hypar.h:441
int restart_iter
Definition: hypar.h:58
int ndims
Definition: hypar.h:26
int MPIBroadcast_character(char *, int, int, void *)
Definition: MPIBroadcast.c:37
int MPIBroadcast_double(double *, int, int, void *)
Definition: MPIBroadcast.c:9
#define _MAX_STRING_SIZE_
Definition: basic.h:14
#define _NC_1STAGE_
Definition: hypar.h:475
int * dim_local
Definition: hypar.h:37
double dt
Definition: hypar.h:67
int gpu_device_no
Definition: hypar.h:450
int ghosts
Definition: hypar.h:52
int use_gpu
Definition: hypar.h:449
int * dim_global
Definition: hypar.h:33
int write_residual
Definition: hypar.h:174
int file_op_iter
Definition: hypar.h:171
int screen_op_iter
Definition: hypar.h:168

◆ WriteInputs()

int WriteInputs ( void *  s,
int  nsims,
int  rank 
)

Write the input parameters

Write the simulation inputs read from the file solver.inp.

Parameters
sArray of simulation objects of type SimulationObject of size nsims
nsimsNumber of simulation objects
rankMPI rank of this process

Definition at line 15 of file WriteInputs.c.

20 {
22  int n;
23 
24  if (sim == NULL) return 0;
25 
26  if (!rank) {
27 
28  printf(" No. of dimensions : %d\n",sim[0].solver.ndims);
29  printf(" No. of variables : %d\n",sim[0].solver.nvars);
30  if (nsims > 1) {
31  printf(" Domain sizes:\n");
32  for (int n = 0; n < nsims; n++) {
33  printf(" domain %3d - ", n);
34  for (int i=0; i<sim[n].solver.ndims; i++) printf ("%d ",sim[n].solver.dim_global[i]);
35  printf("\n");
36  }
37 #ifndef serial
38  printf(" Processes along each dimension:\n");
39  for (int n = 0; n < nsims; n++) {
40  printf(" domain %3d - ", n);
41  for (int i=0; i<sim[n].solver.ndims; i++) printf ("%d ",sim[n].mpi.iproc[i]);
42  printf("\n");
43  }
44 #endif
45  printf(" Exact solution domain sizes:\n");
46  for (int n = 0; n < nsims; n++) {
47  printf(" domain %3d - ", n);
48  for (int i=0; i<sim[n].solver.ndims; i++) printf ("%d ",sim[n].solver.dim_global_ex[i]);
49  printf("\n");
50  }
51  } else {
52  printf(" Domain size : ");
53  for (int i=0; i<sim[0].solver.ndims; i++) printf ("%d ",sim[0].solver.dim_global[i]);
54  printf("\n");
55 #ifndef serial
56  printf(" Processes along each dimension : ");
57  for (int i=0; i<sim[0].solver.ndims; i++) printf ("%d ",sim[0].mpi.iproc[i]);
58  printf("\n");
59 #endif
60  printf(" Exact solution domain size : ");
61  for (int i=0; i<sim[0].solver.ndims; i++) printf ("%d ",sim[0].solver.dim_global_ex[i]);
62  printf("\n");
63  }
64  printf(" No. of ghosts pts : %d\n" ,sim[0].solver.ghosts );
65  printf(" No. of iter. : %d\n" ,sim[0].solver.n_iter );
66  printf(" Restart iteration : %d\n" ,sim[0].solver.restart_iter );
67 #ifdef with_petsc
68  if (sim[0].solver.use_petscTS)
69  printf(" Time integration scheme : PETSc \n" );
70  else {
71  printf(" Time integration scheme : %s ",sim[0].solver.time_scheme );
72  if (strcmp(sim[0].solver.time_scheme,_FORWARD_EULER_)) {
73  printf("(%s)",sim[0].solver.time_scheme_type );
74  }
75  printf("\n");
76  }
77 #else
78  printf(" Time integration scheme : %s ",sim[0].solver.time_scheme );
79  if (strcmp(sim[0].solver.time_scheme,_FORWARD_EULER_)) {
80  printf("(%s)",sim[0].solver.time_scheme_type );
81  }
82  printf("\n");
83 #endif
84  printf(" Spatial discretization scheme (hyperbolic) : %s\n" ,sim[0].solver.spatial_scheme_hyp );
85  printf(" Split hyperbolic flux term? : %s\n" ,sim[0].solver.SplitHyperbolicFlux );
86  printf(" Interpolation type for hyperbolic term : %s\n" ,sim[0].solver.interp_type );
87  printf(" Spatial discretization type (parabolic ) : %s\n" ,sim[0].solver.spatial_type_par );
88  printf(" Spatial discretization scheme (parabolic ) : %s\n" ,sim[0].solver.spatial_scheme_par );
89  printf(" Time Step : %E\n" ,sim[0].solver.dt );
90  printf(" Check for conservation : %s\n" ,sim[0].solver.ConservationCheck );
91  printf(" Screen output iterations : %d\n" ,sim[0].solver.screen_op_iter );
92  printf(" File output iterations : %d\n" ,sim[0].solver.file_op_iter );
93  printf(" Initial solution file type : %s\n" ,sim[0].solver.ip_file_type );
94  printf(" Initial solution read mode : %s" ,sim[0].solver.input_mode );
95  if (strcmp(sim[0].solver.input_mode,"serial")) printf(" [%d file IO rank(s)]\n",sim[0].mpi.N_IORanks );
96  else printf("\n");
97  printf(" Solution file write mode : %s" ,sim[0].solver.output_mode );
98  if (strcmp(sim[0].solver.output_mode,"serial")) printf(" [%d file IO rank(s)]\n",sim[0].mpi.N_IORanks );
99  else printf("\n");
100  printf(" Solution file format : %s\n" ,sim[0].solver.op_file_format );
101  printf(" Overwrite solution file : %s\n" ,sim[0].solver.op_overwrite );
102 #if defined(HAVE_CUDA)
103  printf(" Use GPU : %s\n" ,(sim[0].solver.use_gpu == 1)? "yes" : "no");
104  printf(" GPU device no : %d\n" ,(sim[0].solver.gpu_device_no));
105 #endif
106  printf(" Physical model : %s\n" ,sim[0].solver.model );
107  if (sim[0].solver.flag_ib) {
108  printf(" Immersed Body : %s\n" ,sim[0].solver.ib_filename );
109  }
110  }
111 
112  return 0;
113 }
Structure defining a simulation.
#define _FORWARD_EULER_
int ndims
Definition: hypar.h:26

◆ Initialize()

int Initialize ( void *  s,
int  nsims 
)

Initialize the solver

Initialization function called at the beginning of a simulation. This function does the following:

  • allocates memory for MPI related arrays
  • initializes the values for MPI variables
  • creates sub-communicators and communication groups
  • allocates memory for arrays to store solution, right-hand-side, flux, and other working vectors.
  • initializes function counters to zero
Parameters
sArray of simulation objects of type SimulationObject
nsimsNumber of simulation objects

Definition at line 26 of file Initialize.c.

29 {
30  SimulationObject* simobj = (SimulationObject*) s;
31  int i,d,n;
32 
33  if (nsims == 0) {
34  return 1;
35  }
36 
37 #if defined(HAVE_CUDA)
38  if (simobj[0].solver.use_gpu && (simobj[0].solver.gpu_device_no >= 0)) {
39  gpuSetDevice(simobj[0].solver.gpu_device_no);
40  }
41 #endif
42 
43  if (!simobj[0].mpi.rank) printf("Partitioning domain and allocating data arrays.\n");
44 
45  for (n = 0; n < nsims; n++) {
46 
47  /* this is a full initialization, not a barebones one */
48  simobj[n].is_barebones = 0;
49 
50  /* allocations */
51  simobj[n].mpi.ip = (int*) calloc (simobj[n].solver.ndims,sizeof(int));
52  simobj[n].mpi.is = (int*) calloc (simobj[n].solver.ndims,sizeof(int));
53  simobj[n].mpi.ie = (int*) calloc (simobj[n].solver.ndims,sizeof(int));
54  simobj[n].mpi.bcperiodic = (int*) calloc (simobj[n].solver.ndims,sizeof(int));
55  simobj[n].solver.dim_local = (int*) calloc (simobj[n].solver.ndims,sizeof(int));
56  simobj[n].solver.isPeriodic= (int*) calloc (simobj[n].solver.ndims,sizeof(int));
57 
58 #if defined(HAVE_CUDA)
59  simobj[n].mpi.wctime = 0;
60  simobj[n].mpi.wctime_total = 0;
61 #endif
62 
63 #ifndef serial
65 
66  /* Domain partitioning */
67  int total_proc = 1;
68  for (i=0; i<simobj[n].solver.ndims; i++) total_proc *= simobj[n].mpi.iproc[i];
69  if (simobj[n].mpi.nproc != total_proc) {
70  fprintf(stderr,"Error on rank %d: total number of processes is not consistent ", simobj[n].mpi.rank);
71  fprintf(stderr,"with number of processes along each dimension.\n");
72  if (nsims > 1) fprintf(stderr,"for domain %d.\n", n);
73  fprintf(stderr,"mpiexec was called with %d processes, ",simobj[n].mpi.nproc);
74  fprintf(stderr,"total number of processes from \"solver.inp\" is %d.\n", total_proc);
75  return(1);
76  }
77 
78  /* calculate ndims-D rank of each process (ip[]) from rank in MPI_COMM_WORLD */
79  IERR MPIRanknD( simobj[n].solver.ndims,
80  simobj[n].mpi.rank,
81  simobj[n].mpi.iproc,
82  simobj[n].mpi.ip); CHECKERR(ierr);
83 
84  /* calculate local domain sizes along each dimension */
85  for (i=0; i<simobj[n].solver.ndims; i++) {
86  simobj[n].solver.dim_local[i] = MPIPartition1D( simobj[n].solver.dim_global[i],
87  simobj[n].mpi.iproc[i],
88  simobj[n].mpi.ip[i] );
89  }
90 
91  /* calculate local domain limits in terms of global domain */
92  IERR MPILocalDomainLimits( simobj[n].solver.ndims,
93  simobj[n].mpi.rank,
94  &(simobj[n].mpi),
95  simobj[n].solver.dim_global,
96  simobj[n].mpi.is,
97  simobj[n].mpi.ie );
98  CHECKERR(ierr);
99 
100  /* create sub-communicators for parallel computations along grid lines in each dimension */
101  IERR MPICreateCommunicators(simobj[n].solver.ndims,&(simobj[n].mpi)); CHECKERR(ierr);
102 
103  /* initialize periodic BC flags to zero */
104  for (i=0; i<simobj[n].solver.ndims; i++) simobj[n].mpi.bcperiodic[i] = 0;
105 
106  /* create communication groups */
107  IERR MPICreateIOGroups(&(simobj[n].mpi)); CHECKERR(ierr);
108 
109 #else
110 
111  for (i=0; i<simobj[n].solver.ndims; i++) {
112  simobj[n].mpi.ip[i] = 0;
113  simobj[n].solver.dim_local[i] = simobj[n].solver.dim_global[i];
114  simobj[n].mpi.iproc[i] = 1;
115  simobj[n].mpi.is[i] = 0;
116  simobj[n].mpi.ie[i] = simobj[n].solver.dim_local[i];
117  simobj[n].mpi.bcperiodic[i] = 0;
118  }
119 
120 #endif
121 
122  simobj[n].solver.npoints_global
123  = simobj[n].solver.npoints_local
124  = simobj[n].solver.npoints_local_wghosts
125  = 1;
126  for (i=0; i<simobj[n].solver.ndims; i++) {
127  simobj[n].solver.npoints_global *= simobj[n].solver.dim_global[i];
128  simobj[n].solver.npoints_local *= simobj[n].solver.dim_local [i];
129  simobj[n].solver.npoints_local_wghosts *= (simobj[n].solver.dim_local[i]+2*simobj[n].solver.ghosts);
130  }
131 
132  /* Allocations */
133  simobj[n].solver.index = (int*) calloc ((short)simobj[n].solver.ndims,sizeof(int));
134  simobj[n].solver.stride_with_ghosts = (int*) calloc ((short)simobj[n].solver.ndims,sizeof(int));
135  simobj[n].solver.stride_without_ghosts = (int*) calloc ((short)simobj[n].solver.ndims,sizeof(int));
136  int accu1 = 1, accu2 = 1;
137  for (i=0; i<simobj[n].solver.ndims; i++) {
138  simobj[n].solver.stride_with_ghosts[i] = accu1;
139  simobj[n].solver.stride_without_ghosts[i] = accu2;
140  accu1 *= (simobj[n].solver.dim_local[i]+2*simobj[n].solver.ghosts);
141  accu2 *= simobj[n].solver.dim_local[i];
142  }
143 
144 #if defined(HAVE_CUDA)
145  if (simobj[n].solver.use_gpu) {
146  gpuMalloc((void**)&simobj[n].solver.gpu_dim_local, simobj[n].solver.ndims*sizeof(int));
147  gpuMemcpy( simobj[n].solver.gpu_dim_local,
148  simobj[n].solver.dim_local,
149  simobj[n].solver.ndims*sizeof(int),
151  }
152 #endif
153 
154  /* state variables */
155  int size = 1;
156  for (i=0; i<simobj[n].solver.ndims; i++) {
157  size *= (simobj[n].solver.dim_local[i]+2*simobj[n].solver.ghosts);
158  }
159  simobj[n].solver.ndof_cells_wghosts = simobj[n].solver.nvars*size;
160  simobj[n].solver.u = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
161 #if defined(HAVE_CUDA)
162  if (simobj[n].solver.use_gpu) {
163  gpuMalloc((void**)&simobj[n].solver.gpu_u, simobj[n].solver.nvars*size*sizeof(double));
164  gpuMemset(simobj[n].solver.gpu_u, 0, simobj[n].solver.nvars*size*sizeof(double));
165  }
166 #endif
167 #ifdef with_petsc
168  if (simobj[n].solver.use_petscTS) {
169  simobj[n].solver.u0 = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
170  simobj[n].solver.uref = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
171  simobj[n].solver.rhsref = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
172  simobj[n].solver.rhs = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
173  } else simobj[n].solver.u0 = simobj[n].solver.uref = simobj[n].solver.rhsref = simobj[n].solver.rhs = NULL;
174 #endif
175 #ifdef with_librom
176  simobj[n].solver.u_rom_predicted = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
177 #endif
178  simobj[n].solver.hyp = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
179  simobj[n].solver.par = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
180  simobj[n].solver.source = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
181  simobj[n].solver.iblank = (double*) calloc (size ,sizeof(double));
182 
183 #if defined(HAVE_CUDA)
184  if (simobj[n].solver.use_gpu) {
185  gpuMalloc((void**)&simobj[n].solver.hyp, simobj[n].solver.nvars*size*sizeof(double));
186  gpuMalloc((void**)&simobj[n].solver.par, simobj[n].solver.nvars*size*sizeof(double));
187  gpuMalloc((void**)&simobj[n].solver.source, simobj[n].solver.nvars*size*sizeof(double));
188  gpuMemset(simobj[n].solver.hyp, 0, simobj[n].solver.nvars*size*sizeof(double));
189  gpuMemset(simobj[n].solver.par, 0, simobj[n].solver.nvars*size*sizeof(double));
190  gpuMemset(simobj[n].solver.source, 0, simobj[n].solver.nvars*size*sizeof(double));
191  } else {
192 #endif
193  simobj[n].solver.hyp = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
194  simobj[n].solver.par = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
195  simobj[n].solver.source = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
196 #if defined(HAVE_CUDA)
197  }
198 #endif
199 
200  simobj[n].solver.iblank = (double*) calloc (size,sizeof(double));
201 #if defined(HAVE_CUDA)
202  if (simobj[n].solver.use_gpu) {
203  gpuMalloc((void**)&simobj[n].solver.gpu_iblank, size*sizeof(double));
204  gpuMemset(simobj[n].solver.gpu_iblank, 0, size*sizeof(double));
205  }
206 #endif
207 
208  /* grid */
209  size = 0;
210  for (i=0; i<simobj[n].solver.ndims; i++) {
211  size += (simobj[n].solver.dim_local[i]+2*simobj[n].solver.ghosts);
212  }
213  simobj[n].solver.x = (double*) calloc (size,sizeof(double));
214  simobj[n].solver.dxinv = (double*) calloc (size,sizeof(double));
215  simobj[n].solver.size_x = size;
216 #if defined(HAVE_CUDA)
217  if (simobj[n].solver.use_gpu) {
218  gpuMalloc((void**)&simobj[n].solver.gpu_x, size*sizeof(double));
219  gpuMalloc((void**)&simobj[n].solver.gpu_dxinv, size*sizeof(double));
220  gpuMemset(simobj[n].solver.gpu_x, 0, size*sizeof(double));
221  gpuMemset(simobj[n].solver.gpu_dxinv, 0, size*sizeof(double));
222  }
223 #endif
224 
225  /* cell-centered arrays needed to compute fluxes */
226  size = 1;
227  for (i=0; i<simobj[n].solver.ndims; i++) {
228  size *= (simobj[n].solver.dim_local[i]+2*simobj[n].solver.ghosts);
229  }
230 #if defined(HAVE_CUDA)
231  if (simobj[n].solver.use_gpu) {
232  gpuMalloc((void**)&simobj[n].solver.fluxC, simobj[n].solver.nvars*size*sizeof(double));
233  gpuMalloc((void**)&simobj[n].solver.uC, simobj[n].solver.nvars*size*sizeof(double));
234  gpuMalloc((void**)&simobj[n].solver.Deriv1, simobj[n].solver.nvars*size*sizeof(double));
235  gpuMalloc((void**)&simobj[n].solver.Deriv2, simobj[n].solver.nvars*size*sizeof(double));
236  gpuMemset(simobj[n].solver.fluxC, 0, simobj[n].solver.nvars*size*sizeof(double));
237  gpuMemset(simobj[n].solver.uC, 0, simobj[n].solver.nvars*size*sizeof(double));
238  gpuMemset(simobj[n].solver.Deriv1, 0, simobj[n].solver.nvars*size*sizeof(double));
239  gpuMemset(simobj[n].solver.Deriv2, 0, simobj[n].solver.nvars*size*sizeof(double));
240  } else {
241 #endif
242  simobj[n].solver.uC = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
243  simobj[n].solver.fluxC = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
244  simobj[n].solver.Deriv1 = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
245  simobj[n].solver.Deriv2 = (double*) calloc (simobj[n].solver.nvars*size,sizeof(double));
246 #if defined(HAVE_CUDA)
247  }
248 #endif
249 
250  /* node-centered arrays needed to compute fluxes */
251  size = 1; for (i=0; i<simobj[n].solver.ndims; i++) size *= (simobj[n].solver.dim_local[i]+1);
252  size *= simobj[n].solver.nvars;
253  simobj[n].solver.ndof_nodes = size;
254 #if defined(HAVE_CUDA)
255  if (simobj[n].solver.use_gpu) {
256  gpuMalloc((void**)&simobj[n].solver.fluxI, size*sizeof(double));
257  gpuMalloc((void**)&simobj[n].solver.uL, size*sizeof(double));
258  gpuMalloc((void**)&simobj[n].solver.uR, size*sizeof(double));
259  gpuMalloc((void**)&simobj[n].solver.fL, size*sizeof(double));
260  gpuMalloc((void**)&simobj[n].solver.fR, size*sizeof(double));
261  gpuMemset(simobj[n].solver.fluxI, 0, size*sizeof(double));
262  gpuMemset(simobj[n].solver.uL, 0, size*sizeof(double));
263  gpuMemset(simobj[n].solver.uR, 0, size*sizeof(double));
264  gpuMemset(simobj[n].solver.fL, 0, size*sizeof(double));
265  gpuMemset(simobj[n].solver.fR, 0, size*sizeof(double));
266  } else {
267 #endif
268  simobj[n].solver.fluxI = (double*) calloc (size,sizeof(double));
269  simobj[n].solver.uL = (double*) calloc (size,sizeof(double));
270  simobj[n].solver.uR = (double*) calloc (size,sizeof(double));
271  simobj[n].solver.fL = (double*) calloc (size,sizeof(double));
272  simobj[n].solver.fR = (double*) calloc (size,sizeof(double));
273 #if defined(HAVE_CUDA)
274  }
275 #endif
276 
277  /* allocate MPI send/receive buffer arrays */
278  int bufdim[simobj[n].solver.ndims], maxbuf = 0;
279  for (d = 0; d < simobj[n].solver.ndims; d++) {
280  bufdim[d] = 1;
281  for (i = 0; i < simobj[n].solver.ndims; i++) {
282  if (i == d) bufdim[d] *= simobj[n].solver.ghosts;
283  else bufdim[d] *= simobj[n].solver.dim_local[i];
284  }
285  if (bufdim[d] > maxbuf) maxbuf = bufdim[d];
286  }
287  maxbuf *= (simobj[n].solver.nvars*simobj[n].solver.ndims);
288  simobj[n].mpi.maxbuf = maxbuf;
289  simobj[n].mpi.sendbuf = (double*) calloc (2*simobj[n].solver.ndims*maxbuf,sizeof(double));
290  simobj[n].mpi.recvbuf = (double*) calloc (2*simobj[n].solver.ndims*maxbuf,sizeof(double));
291 #if defined(HAVE_CUDA)
292  if (simobj[n].solver.use_gpu) {
293  simobj[n].mpi.cpu_dim = (int *) calloc(simobj[n].solver.ndims, sizeof(int));
294  _ArrayCopy1D_(simobj[n].solver.dim_local, simobj[n].mpi.cpu_dim, simobj[n].solver.ndims);
295  gpuMalloc((void**)&simobj[n].mpi.gpu_sendbuf, 2*simobj[n].solver.ndims*simobj[n].mpi.maxbuf*sizeof(double));
296  gpuMalloc((void**)&simobj[n].mpi.gpu_recvbuf, 2*simobj[n].solver.ndims*simobj[n].mpi.maxbuf*sizeof(double));
297  gpuMemset(simobj[n].mpi.gpu_sendbuf, 0, 2*simobj[n].solver.ndims*simobj[n].mpi.maxbuf*sizeof(double));
298  gpuMemset(simobj[n].mpi.gpu_recvbuf, 0, 2*simobj[n].solver.ndims*simobj[n].mpi.maxbuf*sizeof(double));
299  }
300 #endif
301 
302  /* allocate the volume and boundary integral arrays */
303  simobj[n].solver.VolumeIntegral = (double*) calloc (simobj[n].solver.nvars ,sizeof(double));
304  simobj[n].solver.VolumeIntegralInitial = (double*) calloc (simobj[n].solver.nvars ,sizeof(double));
305  simobj[n].solver.TotalBoundaryIntegral = (double*) calloc (simobj[n].solver.nvars,sizeof(double));
306  simobj[n].solver.ConservationError = (double*) calloc (simobj[n].solver.nvars,sizeof(double));
307  for (i=0; i<simobj[n].solver.nvars; i++) simobj[n].solver.ConservationError[i] = -1;
308 #if defined(HAVE_CUDA)
309  if (simobj[n].solver.use_gpu) {
310  int total_offset = 0;
311  for (d=0; d<simobj[n].solver.ndims; d++) {
312  simobj[n].solver.gpu_npoints_boundary_offset[d] = total_offset;
313  simobj[n].solver.gpu_npoints_boundary[d] = 1;
314 
315  for (i=0; i<simobj[n].solver.ndims; i++) {
316  if (i != d) simobj[n].solver.gpu_npoints_boundary[d] *= simobj[n].solver.dim_local[i];
317  }
318  total_offset += 2*simobj[n].solver.gpu_npoints_boundary[d];
319  }
320  simobj[n].solver.StageBoundaryBuffer_size = (total_offset*simobj[n].solver.nvars);
321  gpuMalloc((void**)&simobj[n].solver.StageBoundaryBuffer, simobj[n].solver.StageBoundaryBuffer_size*sizeof(double));
322  gpuMemset(simobj[n].solver.StageBoundaryBuffer, 0, simobj[n].solver.StageBoundaryBuffer_size*sizeof(double));
323 
324  size = 2*simobj[n].solver.ndims*simobj[n].solver.nvars;
325  gpuMalloc((void**)&simobj[n].solver.StageBoundaryIntegral, size*sizeof(double));
326  gpuMalloc((void**)&simobj[n].solver.StepBoundaryIntegral, size*sizeof(double));
327  gpuMemset(simobj[n].solver.StageBoundaryIntegral, 0, size*sizeof(double));
328  gpuMemset(simobj[n].solver.StepBoundaryIntegral, 0, size*sizeof(double));
329  } else {
330 #endif
331  simobj[n].solver.StageBoundaryIntegral = (double*) calloc (2*simobj[n].solver.ndims*simobj[n].solver.nvars,sizeof(double));
332  simobj[n].solver.StepBoundaryIntegral = (double*) calloc (2*simobj[n].solver.ndims*simobj[n].solver.nvars,sizeof(double));
333 #if defined(HAVE_CUDA)
334  }
335 #endif
336 
337  /* initialize function call counts to zero */
338  simobj[n].solver.count_hyp
339  = simobj[n].solver.count_par
340  = simobj[n].solver.count_sou
341  = 0;
342 #ifdef with_petsc
343  simobj[n].solver.count_RHSFunction
344  = simobj[n].solver.count_IFunction
345  = simobj[n].solver.count_IJacobian
346  = simobj[n].solver.count_IJacFunction
347  = 0;
348 #endif
349 
350  /* Initialize iblank to 1*/
351  _ArraySetValue_(simobj[n].solver.iblank,simobj[n].solver.npoints_local_wghosts,1);
352 #if defined(HAVE_CUDA)
353  if (simobj[n].solver.use_gpu) {
354  gpuArraySetValue(simobj[n].solver.gpu_iblank, simobj[n].solver.npoints_local_wghosts, 1.0);
355  }
356 #endif
357 
358  }
359 
360  return 0;
361 }
double * Deriv2
Definition: hypar.h:151
int nvars
Definition: hypar.h:29
void gpuMemset(void *, int, size_t)
#define IERR
Definition: basic.h:16
#define CHECKERR(ierr)
Definition: basic.h:18
Structure defining a simulation.
double * par
Definition: hypar.h:122
double * u
Definition: hypar.h:116
int ndof_nodes
Definition: hypar.h:46
int npoints_local
Definition: hypar.h:42
double * iblank
Definition: hypar.h:436
double * uR
Definition: hypar.h:139
double * x
Definition: hypar.h:107
int size_x
Definition: hypar.h:48
double wctime_total
double * u0
Definition: hypar.h:396
int * stride_with_ghosts
Definition: hypar.h:414
double * fluxI
Definition: hypar.h:136
void gpuArraySetValue(double *, int, double)
int ndims
Definition: hypar.h:26
double * uC
Definition: hypar.h:131
int ndof_cells_wghosts
Definition: hypar.h:47
double * rhs
Definition: hypar.h:399
void gpuMalloc(void **, size_t)
int * stride_without_ghosts
Definition: hypar.h:416
double * Deriv1
Definition: hypar.h:151
int npoints_global
Definition: hypar.h:40
double * uL
Definition: hypar.h:139
int * index
Definition: hypar.h:102
int MPIPartition1D(int, int, int)
#define _ArraySetValue_(x, size, value)
int * dim_local
Definition: hypar.h:37
double * rhsref
Definition: hypar.h:398
void gpuSetDevice(int device)
void gpuMemcpy(void *, const void *, size_t, enum gpuMemcpyKind)
double * fR
Definition: hypar.h:139
double * fL
Definition: hypar.h:139
int MPILocalDomainLimits(int, int, void *, int *, int *, int *)
int ghosts
Definition: hypar.h:52
int MPICreateIOGroups(void *)
Definition: MPIIOGroups.c:37
int npoints_local_wghosts
Definition: hypar.h:42
double * u_rom_predicted
Definition: hypar.h:403
int MPIRanknD(int, int, int *, int *)
Definition: MPIRanknD.c:27
#define _DECLARE_IERR_
Definition: basic.h:17
double * uref
Definition: hypar.h:397
#define _ArrayCopy1D_(x, y, size)
int * dim_global
Definition: hypar.h:33
int * isPeriodic
Definition: hypar.h:162
int MPICreateCommunicators(int, void *)
double * source
Definition: hypar.h:125
double * hyp
Definition: hypar.h:119
double * fluxC
Definition: hypar.h:128
double * dxinv
Definition: hypar.h:110

◆ Initialize_GPU()

int Initialize_GPU ( void *  ,
int   
)

Initialize the solver

◆ InitialSolution()

int InitialSolution ( void *  s,
int  nsims 
)

Read the initial solution

Read in initial solution from file, and compute grid spacing and volume integral of the initial solution

Parameters
sArray of simulation objects of type SimulationObject
nsimsNumber of simulation objects

Definition at line 25 of file InitialSolution.c.

28 {
29  SimulationObject* simobj = (SimulationObject*) s;
30  int n, flag, d, i, offset, ierr;
31 
32  for (n = 0; n < nsims; n++) {
33 
34  int ghosts = simobj[n].solver.ghosts;
35 
36  char fname_root[_MAX_STRING_SIZE_] = "initial";
37  if (nsims > 1) {
38  char index[_MAX_STRING_SIZE_];
39  GetStringFromInteger(n, index, (int)log10(nsims)+1);
40  strcat(fname_root, "_");
41  strcat(fname_root, index);
42  }
43 
44  ierr = ReadArray( simobj[n].solver.ndims,
45  simobj[n].solver.nvars,
46  simobj[n].solver.dim_global,
47  simobj[n].solver.dim_local,
48  simobj[n].solver.ghosts,
49  &(simobj[n].solver),
50  &(simobj[n].mpi),
51  simobj[n].solver.x,
52  simobj[n].solver.u,
53  fname_root,
54  &flag );
55  if (ierr) {
56  fprintf(stderr, "Error in InitialSolution() on rank %d.\n",
57  simobj[n].mpi.rank);
58  return ierr;
59  }
60  if (!flag) {
61  fprintf(stderr,"Error: initial solution file not found.\n");
62  return(1);
63  }
64  CHECKERR(ierr);
65 
66  /* exchange MPI-boundary values of u between processors */
67  MPIExchangeBoundariesnD( simobj[n].solver.ndims,
68  simobj[n].solver.nvars,
69  simobj[n].solver.dim_local,
70  simobj[n].solver.ghosts,
71  &(simobj[n].mpi),
72  simobj[n].solver.u );
73 
74  /* calculate dxinv */
75  offset = 0;
76  for (d = 0; d < simobj[n].solver.ndims; d++) {
77  for (i = 0; i < simobj[n].solver.dim_local[d]; i++) {
78  simobj[n].solver.dxinv[i+offset+ghosts]
79  = 2.0 / (simobj[n].solver.x[i+1+offset+ghosts]-simobj[n].solver.x[i-1+offset+ghosts]);
80  }
81  offset += (simobj[n].solver.dim_local[d] + 2*ghosts);
82  }
83 
84  /* exchange MPI-boundary values of dxinv between processors */
85  offset = 0;
86  for (d = 0; d < simobj[n].solver.ndims; d++) {
87  ierr = MPIExchangeBoundaries1D( &(simobj[n].mpi),
88  &(simobj[n].solver.dxinv[offset]),
89  simobj[n].solver.dim_local[d],
90  ghosts,
91  d,
92  simobj[n].solver.ndims ); CHECKERR(ierr);
93  if (ierr) {
94  fprintf(stderr, "Error in InitialSolution() on rank %d.\n",
95  simobj[n].mpi.rank);
96  return ierr;
97  }
98  offset += (simobj[n].solver.dim_local[d] + 2*ghosts);
99  }
100 
101  /* fill in ghost values of dxinv at physical boundaries by extrapolation */
102  offset = 0;
103  for (d = 0; d < simobj[n].solver.ndims; d++) {
104  double *dxinv = &(simobj[n].solver.dxinv[offset]);
105  int *dim = simobj[n].solver.dim_local;
106  if (simobj[n].mpi.ip[d] == 0) {
107  /* fill left boundary along this dimension */
108  for (i = 0; i < ghosts; i++) dxinv[i] = dxinv[ghosts];
109  }
110  if (simobj[n].mpi.ip[d] == simobj[n].mpi.iproc[d]-1) {
111  /* fill right boundary along this dimension */
112  for (i = dim[d]+ghosts; i < dim[d]+2*ghosts; i++) dxinv[i] = dxinv[dim[d]+ghosts-1];
113  }
114  offset += (dim[d] + 2*ghosts);
115  }
116 
117  /* calculate volume integral of the initial solution */
118  ierr = VolumeIntegral( simobj[n].solver.VolumeIntegralInitial,
119  simobj[n].solver.u,
120  &(simobj[n].solver),
121  &(simobj[n].mpi) ); CHECKERR(ierr);
122  if (ierr) {
123  fprintf(stderr, "Error in InitialSolution() on rank %d.\n",
124  simobj[n].mpi.rank);
125  return ierr;
126  }
127  if (!simobj[n].mpi.rank) {
128  if (nsims > 1) printf("Volume integral of the initial solution on domain %d:\n", n);
129  else printf("Volume integral of the initial solution:\n");
130  for (d=0; d<simobj[n].solver.nvars; d++) {
131  printf("%2d: %1.16E\n",d,simobj[n].solver.VolumeIntegralInitial[d]);
132  }
133  }
134  /* Set initial total boundary flux integral to zero */
135  _ArraySetValue_(simobj[n].solver.TotalBoundaryIntegral,simobj[n].solver.nvars,0);
136 
137  }
138 
139 #if defined(HAVE_CUDA)
140  if (simobj[0].solver.use_gpu) {
141  for (int n = 0; n < nsims; n++) {
142  int npoints_local_wghosts = simobj[n].solver.npoints_local_wghosts;
143  int nvars = simobj[n].solver.nvars;
144  int size_x = simobj[n].solver.size_x;
145 
146  gpuMemcpy(simobj[n].solver.gpu_x,
147  simobj[n].solver.x, simobj[n].solver.size_x*sizeof(double),
149  gpuMemcpy(simobj[n].solver.gpu_dxinv, simobj[n].solver.dxinv,
150  simobj[n].solver.size_x*sizeof(double),
152 
153 #ifdef CUDA_VAR_ORDERDING_AOS
154  gpuMemcpy(simobj[n].solver.gpu_u,
155  simobj[n].solver.u,
156  simobj[n].solver.ndof_cells_wghosts*sizeof(double),
158 #else
159  double *h_u = (double *) malloc(simobj[n].solver.ndof_cells_wghosts*sizeof(double));
160  for (int i=0; i<npoints_local_wghosts; i++) {
161  for (int v=0; v<nvars; v++) {
162  h_u[i+v*npoints_local_wghosts] = simobj[n].solver.u[i*nvars+v];
163  }
164  }
165  gpuMemcpy(simobj[n].solver.gpu_u,
166  h_u,
167  simobj[n].solver.ndof_cells_wghosts*sizeof(double),
169  free(h_u);
170 #endif
171  }
172  }
173 #endif
174 
175  return 0;
176 }
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
Structure defining a simulation.
int MPIExchangeBoundariesnD(int, int, int *, int, void *, double *)
double * u
Definition: hypar.h:116
double * x
Definition: hypar.h:107
int size_x
Definition: hypar.h:48
int MPIExchangeBoundaries1D(void *, double *, int, int, int, int)
int ndims
Definition: hypar.h:26
#define _MAX_STRING_SIZE_
Definition: basic.h:14
void GetStringFromInteger(int, char *, int)
#define _ArraySetValue_(x, size, value)
int * dim_local
Definition: hypar.h:37
void gpuMemcpy(void *, const void *, size_t, enum gpuMemcpyKind)
int ghosts
Definition: hypar.h:52
int VolumeIntegral(double *, double *, void *, void *)
int ReadArray(int, int, int *, int *, int, void *, void *, double *, double *, char *, int *)
Definition: ReadArray.c:25
int npoints_local_wghosts
Definition: hypar.h:42
double * dxinv
Definition: hypar.h:110

◆ InitializeBoundaries()

int InitializeBoundaries ( void *  s,
int  nsims 
)

Initialize the boundary conditions

This function initializes the variables and functions related to implementing the boundary conditions.

  • Rank 0 reads in the boundary conditions file and broadcasts the information to all processors.
  • Depending on the type of boundary, additional information is read in. For example, for Dirichlet boundary, the Dirichlet value is read in.
  • Allocate and initialize arrays and variables related to implementing the boundary conditions.
  • Each rank finds out if the subdomain it owns abuts any of the boundaries specified.

Note that boundary conditions are implemented as boundary objects of the type DomainBoundary.

Parameters
sArray of simulation objects of type SimulationObject
nsimsnumber of simulation objects

Definition at line 36 of file InitializeBoundaries.c.

39 {
41  int ns;
43 
44  for (ns = 0; ns < nsims; ns++) {
45 
46  DomainBoundary *boundary = NULL;
47  HyPar *solver = &(sim[ns].solver);
48  MPIVariables *mpi = &(sim[ns].mpi);
49  int nb, ferr;
50 
51  /* root process reads boundary condition file */
52  if (!mpi->rank) {
53 
54  char filename[_MAX_STRING_SIZE_] = "boundary";
55  char filename_backup[_MAX_STRING_SIZE_] = "boundary";
56  if (nsims > 1) {
57  char index[_MAX_STRING_SIZE_];
58  GetStringFromInteger(ns, index, (int)log10(nsims)+1);
59  strcat(filename, "_");
60  strcat(filename, index);
61  }
62  strcat(filename, ".inp");
63  strcat(filename_backup, ".inp");
64 
65  FILE *in;
66  in = fopen(filename,"r");
67  if (!in) {
68  in = fopen(filename_backup, "r");
69  if (!in) {
70  fprintf(stderr,"Error: boundary condition file %s or %s not found.\n",
71  filename, filename_backup );
72  return(1);
73  } else {
74  if (nsims > 1) printf("Domain %d: ", ns);
75  printf("Reading boundary conditions from %s.\n", filename_backup);
76  }
77  } else {
78  if (nsims > 1) printf("Domain %d: ", ns);
79  printf("Reading boundary conditions from %s.\n", filename);
80  }
81 
82  /* read number of boundary conditions and allocate */
83  ferr = fscanf(in,"%d",&solver->nBoundaryZones); if (ferr != 1) return(1);
84  boundary = (DomainBoundary*) calloc (solver->nBoundaryZones,sizeof(DomainBoundary));
85  for (nb = 0; nb < solver->nBoundaryZones; nb++) {
86  boundary[nb].DirichletValue = boundary[nb].SpongeValue
87  = boundary[nb].FlowVelocity
88  = boundary[nb].UnsteadyDirichletData
89  = NULL;
90  boundary[nb].UnsteadyDirichletSize = NULL;
91  }
92 
93  /* read each boundary condition */
94  for (nb = 0; nb < solver->nBoundaryZones; nb++) {
95  int d, v;
96  boundary[nb].xmin = (double*) calloc (solver->ndims,sizeof(double)); /* deallocated in BCCleanup.c */
97  boundary[nb].xmax = (double*) calloc (solver->ndims,sizeof(double)); /* deallocated in BCCleanup.c */
98 
99  ferr = fscanf(in,"%s",boundary[nb].bctype); if (ferr != 1) return(1);
100  ferr = fscanf(in,"%d",&boundary[nb].dim ); if (ferr != 1) return(1);
101  ferr = fscanf(in,"%d",&boundary[nb].face ); if (ferr != 1) return(1);
102  for (d=0; d < solver->ndims; d++) {
103  ferr = fscanf(in,"%lf %lf", &boundary[nb].xmin[d], &boundary[nb].xmax[d]);
104  if (ferr != 2) return(1);
105  }
106 
107  /* read in boundary type-specific additional data if required */
108 
109  if (!strcmp(boundary[nb].bctype,_DIRICHLET_)) {
110  boundary[nb].DirichletValue = (double*) calloc (solver->nvars,sizeof(double));
111  /* deallocated in BCCleanup.c */
112  /* read the Dirichlet value for each variable on this boundary */
113  for (v = 0; v < solver->nvars; v++) ferr = fscanf(in,"%lf",&boundary[nb].DirichletValue[v]);
114  }
115 
116  if (!strcmp(boundary[nb].bctype,_SPONGE_)) {
117  boundary[nb].SpongeValue = (double*) calloc (solver->nvars,sizeof(double));
118  /* deallocated in BCCleanup.c */
119  /* read the sponge value for each variable on this boundary */
120  for (v = 0; v < solver->nvars; v++) ferr = fscanf(in,"%lf",&boundary[nb].SpongeValue[v]);
121  }
122 
123  if ( (!strcmp(boundary[nb].bctype,_SLIP_WALL_))
124  || (!strcmp(boundary[nb].bctype,_NOSLIP_WALL_)) ) {
125  boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
126  /* deallocated in BCCleanup.c */
127  /* read the wall velocity */
128  for (v = 0; v < solver->ndims; v++) ferr = fscanf(in,"%lf",&boundary[nb].FlowVelocity[v]);
129  }
130 
131  if ( (!strcmp(boundary[nb].bctype,_SW_SLIP_WALL_))
132  || (!strcmp(boundary[nb].bctype,_SW_NOSLIP_WALL_)) ) {
133  boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
134  /* deallocated in BCCleanup.c */
135  /* read the wall velocity */
136  for (v = 0; v < solver->ndims; v++) ferr = fscanf(in,"%lf",&boundary[nb].FlowVelocity[v]);
137  }
138 
139  if (!strcmp(boundary[nb].bctype,_SUBSONIC_INFLOW_)) {
140  boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
141  /* deallocated in BCCleanup.c */
142  /* read in the inflow density and velocity */
143  ferr = fscanf(in,"%lf",&boundary[nb].FlowDensity);
144  for (v = 0; v < solver->ndims; v++) ferr = fscanf(in,"%lf",&boundary[nb].FlowVelocity[v]);
145  }
146 
147  if (!strcmp(boundary[nb].bctype,_SUBSONIC_OUTFLOW_)) {
148  /* read in the outflow pressure */
149  ferr = fscanf(in,"%lf",&boundary[nb].FlowPressure);
150  }
151 
152  if (!strcmp(boundary[nb].bctype,_SUBSONIC_AMBIVALENT_)) {
153  boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
154  /* deallocated in BCCleanup.c */
155  /* read in the inflow density, velocity, and pressure */
156  ferr = fscanf(in,"%lf",&boundary[nb].FlowDensity);
157  for (v = 0; v < solver->ndims; v++) ferr = fscanf(in,"%lf",&boundary[nb].FlowVelocity[v]);
158  ferr = fscanf(in,"%lf",&boundary[nb].FlowPressure);
159  }
160 
161  if (!strcmp(boundary[nb].bctype,_SUPERSONIC_INFLOW_)) {
162  boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
163  /* deallocated in BCCleanup.c */
164  /* read in the inflow density, velocity and pressure */
165  ferr = fscanf(in,"%lf",&boundary[nb].FlowDensity);
166  for (v = 0; v < solver->ndims; v++) ferr = fscanf(in,"%lf",&boundary[nb].FlowVelocity[v]);
167  ferr = fscanf(in,"%lf",&boundary[nb].FlowPressure);
168  }
169 
170  if (!strcmp(boundary[nb].bctype,_TURBULENT_SUPERSONIC_INFLOW_)) {
171  boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
172  /* deallocated in BCCleanup.c */
173  /* read in the inflow density, velocity and pressure */
174  ferr = fscanf(in,"%lf",&boundary[nb].FlowDensity);
175  for (v = 0; v < solver->ndims; v++) ferr = fscanf(in,"%lf",&boundary[nb].FlowVelocity[v]);
176  ferr = fscanf(in,"%lf",&boundary[nb].FlowPressure);
177  ferr = fscanf(in,"%s" , boundary[nb].UnsteadyDirichletFilename);
178  }
179 
180  if ( (!strcmp(boundary[nb].bctype,_THERMAL_SLIP_WALL_))
181  || (!strcmp(boundary[nb].bctype,_THERMAL_NOSLIP_WALL_)) ){
182  boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
183  /* deallocated in BCCleanup.c */
184  /* read the wall velocity */
185  for (v = 0; v < solver->ndims; v++) ferr = fscanf(in,"%lf",&boundary[nb].FlowVelocity[v]);
186  /* read in the filename where temperature data is available */
187  ferr = fscanf(in,"%s" , boundary[nb].UnsteadyTemperatureFilename);
188  }
189 
190  /* if boundary is periodic, let the MPI and HyPar know */
191  if (!strcmp(boundary[nb].bctype,_PERIODIC_)) {
192  solver->isPeriodic[boundary[nb].dim] = 1;
193  }
194  /*
195  The MPI function to exchange internal (MPI) boundary information will handle
196  periodic boundaries ONLY IF number of process along that dimension is more
197  than 1.
198  */
199  if ((!strcmp(boundary[nb].bctype,_PERIODIC_)) && (mpi->iproc[boundary[nb].dim] > 1)) {
200  mpi->bcperiodic[boundary[nb].dim] = 1;
201  }
202 
203  /* some checks */
204  if (boundary[nb].dim >= solver->ndims) {
205  fprintf(stderr,"Error in reading boundary condition %d: dim %d is invalid (ndims = %d).\n",
206  nb,boundary[nb].dim,solver->ndims);
207  return(1);
208  }
209  printf(" Boundary %30s: Along dimension %2d and face %+1d\n",
210  boundary[nb].bctype,boundary[nb].dim,boundary[nb].face);
211  }
212 
213  fclose(in);
214  printf("%d boundary condition(s) read.\n",solver->nBoundaryZones);
215  }
216 
217  /* tell other processes how many BCs are there and let them allocate */
218  IERR MPIBroadcast_integer(&solver->nBoundaryZones,1,0,&mpi->world); CHECKERR(ierr);
219  if (mpi->rank) {
220  boundary = (DomainBoundary*) calloc (solver->nBoundaryZones,sizeof(DomainBoundary));
221  for (nb = 0; nb < solver->nBoundaryZones; nb++) {
222  boundary[nb].xmin = (double*) calloc (solver->ndims,sizeof(double)); /* deallocated in BCCleanup.c */
223  boundary[nb].xmax = (double*) calloc (solver->ndims,sizeof(double)); /* deallocated in BCCleanup.c */
224  boundary[nb].DirichletValue = boundary[nb].SpongeValue
225  = boundary[nb].FlowVelocity
226  = boundary[nb].UnsteadyDirichletData
227  = NULL;
228  boundary[nb].UnsteadyDirichletSize = NULL;
229  }
230  }
231 
232  /* communicate BC data to other processes */
233  for (nb = 0; nb < solver->nBoundaryZones; nb++) {
234  IERR MPIBroadcast_character(boundary[nb].bctype,_MAX_STRING_SIZE_,0,&mpi->world); CHECKERR(ierr);
235  IERR MPIBroadcast_integer (&boundary[nb].dim ,1 ,0,&mpi->world); CHECKERR(ierr);
236  IERR MPIBroadcast_integer (&boundary[nb].face ,1 ,0,&mpi->world); CHECKERR(ierr);
237  IERR MPIBroadcast_double (boundary[nb].xmin ,solver->ndims ,0,&mpi->world); CHECKERR(ierr);
238  IERR MPIBroadcast_double (boundary[nb].xmax ,solver->ndims ,0,&mpi->world); CHECKERR(ierr);
239  }
240  IERR MPIBroadcast_integer(solver->isPeriodic,solver->ndims,0,&mpi->world);CHECKERR(ierr);
241 
242  /* broadcast periodic boundary info for MPI to all processes */
243  IERR MPIBroadcast_integer(mpi->bcperiodic,solver->ndims,0,&mpi->world);CHECKERR(ierr);
244 
245  /* On other processes, if necessary, allocate and receive boundary-type-specific data */
246  for (nb = 0; nb < solver->nBoundaryZones; nb++) {
247  if (!strcmp(boundary[nb].bctype,_DIRICHLET_)) {
248  if (mpi->rank) boundary[nb].DirichletValue = (double*) calloc (solver->nvars,sizeof(double));
249  IERR MPIBroadcast_double(boundary[nb].DirichletValue,solver->nvars,0,&mpi->world); CHECKERR(ierr);
250  }
251 
252  if (!strcmp(boundary[nb].bctype,_SPONGE_)) {
253  if (mpi->rank) boundary[nb].SpongeValue = (double*) calloc (solver->nvars,sizeof(double));
254  IERR MPIBroadcast_double(boundary[nb].SpongeValue,solver->nvars,0,&mpi->world); CHECKERR(ierr);
255  }
256 
257  if ( (!strcmp(boundary[nb].bctype,_SLIP_WALL_))
258  || (!strcmp(boundary[nb].bctype,_NOSLIP_WALL_)) ) {
259  if (mpi->rank) boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
260  IERR MPIBroadcast_double(boundary[nb].FlowVelocity,solver->ndims,0,&mpi->world); CHECKERR(ierr);
261  }
262 
263  if ( (!strcmp(boundary[nb].bctype,_SW_SLIP_WALL_))
264  || (!strcmp(boundary[nb].bctype,_SW_NOSLIP_WALL_)) ) {
265  if (mpi->rank) boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
266  IERR MPIBroadcast_double(boundary[nb].FlowVelocity,solver->ndims,0,&mpi->world); CHECKERR(ierr);
267  }
268 
269  if (!strcmp(boundary[nb].bctype,_SUBSONIC_INFLOW_)) {
270  if (mpi->rank) boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
271  IERR MPIBroadcast_double(&boundary[nb].FlowDensity,1 ,0,&mpi->world); CHECKERR(ierr);
272  IERR MPIBroadcast_double(boundary[nb].FlowVelocity,solver->ndims,0,&mpi->world); CHECKERR(ierr);
273  }
274 
275  if (!strcmp(boundary[nb].bctype,_SUBSONIC_OUTFLOW_)) {
276  IERR MPIBroadcast_double(&boundary[nb].FlowPressure,1,0,&mpi->world); CHECKERR(ierr);
277  }
278 
279  if (!strcmp(boundary[nb].bctype,_SUBSONIC_AMBIVALENT_)) {
280  if (mpi->rank) boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
281  IERR MPIBroadcast_double(&boundary[nb].FlowDensity,1 ,0,&mpi->world); CHECKERR(ierr);
282  IERR MPIBroadcast_double(boundary[nb].FlowVelocity,solver->ndims,0,&mpi->world); CHECKERR(ierr);
283  IERR MPIBroadcast_double(&boundary[nb].FlowPressure,1,0,&mpi->world); CHECKERR(ierr);
284  }
285 
286  if (!strcmp(boundary[nb].bctype,_SUPERSONIC_INFLOW_)) {
287  if (mpi->rank) boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
288  IERR MPIBroadcast_double(&boundary[nb].FlowDensity ,1 ,0,&mpi->world); CHECKERR(ierr);
289  IERR MPIBroadcast_double(boundary[nb].FlowVelocity ,solver->ndims,0,&mpi->world); CHECKERR(ierr);
290  IERR MPIBroadcast_double(&boundary[nb].FlowPressure,1 ,0,&mpi->world); CHECKERR(ierr);
291  }
292 
293  if (!strcmp(boundary[nb].bctype,_TURBULENT_SUPERSONIC_INFLOW_)) {
294  if (mpi->rank) boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
295  IERR MPIBroadcast_double(&boundary[nb].FlowDensity ,1 ,0,&mpi->world); CHECKERR(ierr);
296  IERR MPIBroadcast_double(boundary[nb].FlowVelocity ,solver->ndims,0,&mpi->world); CHECKERR(ierr);
297  IERR MPIBroadcast_double(&boundary[nb].FlowPressure,1 ,0,&mpi->world); CHECKERR(ierr);
298  /* allocate arrays and read in unsteady boundary data */
299  IERR BCReadTurbulentInflowData(&boundary[nb],mpi,solver->ndims,solver->nvars,solver->dim_local); CHECKERR(ierr);
300  }
301 
302  if ( (!strcmp(boundary[nb].bctype,_THERMAL_SLIP_WALL_))
303  || (!strcmp(boundary[nb].bctype,_THERMAL_NOSLIP_WALL_)) ) {
304  if (mpi->rank) boundary[nb].FlowVelocity = (double*) calloc (solver->ndims,sizeof(double));
305  IERR MPIBroadcast_double(boundary[nb].FlowVelocity,solver->ndims,0,&mpi->world); CHECKERR(ierr);
306  /* allocate arrays and read in boundary temperature data */
307  IERR BCReadTemperatureData(&boundary[nb],mpi,solver->ndims,solver->nvars,solver->dim_local); CHECKERR(ierr);
308  }
309 
310  }
311 
312  solver->boundary = boundary;
313 
314  /* each process calculates its own part of these boundaries */
315  IERR CalculateLocalExtent(solver,mpi); CHECKERR(ierr);
316 
317 #if defined(HAVE_CUDA)
318  int bounds[GPU_MAX_NDIMS];
319  if (sim[0].solver.use_gpu) {
320  for (nb = 0; nb < solver->nBoundaryZones; nb++) {
321  _ArraySubtract1D_(bounds,boundary[nb].ie,boundary[nb].is,solver->ndims);
322 
323  _ArrayProduct1D_(bounds,solver->ndims,boundary[nb].gpu_npoints_bounds);
324  boundary[nb].gpu_npoints_local_wghosts = solver->npoints_local_wghosts;
325 
326  _ArrayProduct1D_(bounds,solver->ndims,boundary[nb].gpu_npoints_bounds);
327  boundary[nb].gpu_npoints_local_wghosts = solver->npoints_local_wghosts;
328 
329  gpuMalloc((void**)&boundary[nb].gpu_is, solver->ndims*sizeof(int));
330  gpuMalloc((void**)&boundary[nb].gpu_ie, solver->ndims*sizeof(int));
331  gpuMalloc((void**)&boundary[nb].gpu_bounds, solver->ndims*sizeof(int));
332  gpuMemcpy(boundary[nb].gpu_is, boundary[nb].is, solver->ndims*sizeof(int), gpuMemcpyHostToDevice);
333  gpuMemcpy(boundary[nb].gpu_ie, boundary[nb].ie, solver->ndims*sizeof(int), gpuMemcpyHostToDevice);
334  gpuMemcpy(boundary[nb].gpu_bounds, bounds, solver->ndims*sizeof(int), gpuMemcpyHostToDevice);
335  if ( (!strcmp(boundary[nb].bctype,_SLIP_WALL_))
336  || (!strcmp(boundary[nb].bctype,_NOSLIP_WALL_)) ) {
337  gpuMalloc((void**)&boundary[nb].gpu_FlowVelocity, solver->ndims*sizeof(double));
338  gpuMemcpy( boundary[nb].gpu_FlowVelocity,
339  boundary[nb].FlowVelocity,
340  solver->ndims*sizeof(double),
342  }
343  }
344  }
345 #endif
346 
347  /* initialize function pointers for each boundary condition */
348  for (nb = 0; nb < solver->nBoundaryZones; nb++) {
349 #if defined(HAVE_CUDA)
350  BCInitialize(&boundary[nb], solver->use_gpu);
351 #else
352  BCInitialize(&boundary[nb], 0);
353 #endif
354  }
355 
356  }
357 
358  return 0;
359 }
#define _SUPERSONIC_INFLOW_
static int CalculateLocalExtent(void *, void *)
int BCInitialize(void *, int)
Definition: BCInitialize.c:12
int nvars
Definition: hypar.h:29
#define IERR
Definition: basic.h:16
#define CHECKERR(ierr)
Definition: basic.h:18
Structure defining a simulation.
void * boundary
Definition: hypar.h:159
#define _SUBSONIC_AMBIVALENT_
int MPIBroadcast_integer(int *, int, int, void *)
Definition: MPIBroadcast.c:23
#define _DIRICHLET_
#define _THERMAL_SLIP_WALL_
#define _THERMAL_NOSLIP_WALL_
#define _SPONGE_
Structure containing the variables and function pointers defining a boundary.
#define _SW_NOSLIP_WALL_
int ndims
Definition: hypar.h:26
#define _ArraySubtract1D_(x, a, b, size)
double * UnsteadyDirichletData
int MPIBroadcast_character(char *, int, int, void *)
Definition: MPIBroadcast.c:37
int MPIBroadcast_double(double *, int, int, void *)
Definition: MPIBroadcast.c:9
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
void gpuMalloc(void **, size_t)
#define _MAX_STRING_SIZE_
Definition: basic.h:14
#define _SW_SLIP_WALL_
int BCReadTurbulentInflowData(void *, void *, int, int, int *)
Definition: BCIO.c:19
MPI_Comm world
void GetStringFromInteger(int, char *, int)
#define GPU_MAX_NDIMS
Definition: basic_gpu.h:8
#define _SUBSONIC_INFLOW_
int * dim_local
Definition: hypar.h:37
void gpuMemcpy(void *, const void *, size_t, enum gpuMemcpyKind)
#define _SUBSONIC_OUTFLOW_
#define _NOSLIP_WALL_
Structure of MPI-related variables.
int use_gpu
Definition: hypar.h:449
int nBoundaryZones
Definition: hypar.h:157
int npoints_local_wghosts
Definition: hypar.h:42
#define _DECLARE_IERR_
Definition: basic.h:17
#define _SLIP_WALL_
#define _ArrayProduct1D_(x, size, p)
int * isPeriodic
Definition: hypar.h:162
int BCReadTemperatureData(void *, void *, int, int, int *)
Definition: BCIO.c:147
#define _PERIODIC_
#define _TURBULENT_SUPERSONIC_INFLOW_

◆ InitializeImmersedBoundaries()

int InitializeImmersedBoundaries ( void *  s,
int  nsims 
)

Initialize the immersed boundary conditions

Initialize the immersed boundaries, if present.

  • Read in immersed body from STL file.
  • Allocate and set up ImmersedBoundary object.
  • Identify blanked-out grid points based on immersed body geometry.
  • Identify and make a list of immersed boundary points on each rank.
  • For each immersed boundary point, find the "nearest" facet.
Parameters
sArray of simulation objects of type SimulationObject
nsimsNumber of simulation objects

Definition at line 22 of file InitializeImmersedBoundaries.c.

25 {
26  SimulationObject* simobj = (SimulationObject*) s;
27  int n;
28 
29  for (n = 0; n < nsims; n++) {
30 
31  HyPar *solver = &(simobj[n].solver);
32  MPIVariables *mpi = &(simobj[n].mpi);
33 
34  ImmersedBoundary *ib = NULL;
35  Body3D *body = NULL;
36 
37  int stat, d, ndims = solver->ndims;
38 
39  if ((!solver->flag_ib) || (ndims != _IB_NDIMS_)) {
40  solver->ib = NULL;
41  continue;
42  }
43 
44  /* Read in immersed body from file */
45  IBReadBodySTL(&body,solver->ib_filename,mpi,&stat);
46  if (stat) {
47  if (!mpi->rank) {
48  fprintf(stderr,"Error in InitializeImmersedBoundaries(): Unable to ");
49  fprintf(stderr,"read immersed body from file %s.\n",solver->ib_filename);
50  }
51  solver->flag_ib = 0;
52  solver->ib = NULL;
53  return(1);
54  }
56 
57  /* allocate immersed boundary object and set it up */
58  ib = (ImmersedBoundary*) calloc (1, sizeof(ImmersedBoundary));
59  ib->tolerance = 1e-12;
60  ib->delta = 1e-6;
61  ib->itr_max = 500;
62  ib->body = body;
63  solver->ib = ib;
64 
65  int offset_global, offset_local,
66  *dim_local = solver->dim_local,
67  *dim_global = solver->dim_global,
68  ghosts = solver->ghosts,
69  size = dim_global[0] + dim_global[1] + dim_global[2],
70  count = 0;
71  double *Xg = (double*) calloc(size,sizeof(double));
72 
73  /* assemble the global grid on rank 0 */
74  offset_global = offset_local = 0;
75  for (d=0; d<ndims; d++) {
76  IERR MPIGatherArray1D(mpi,(mpi->rank?NULL:&Xg[offset_global]),
77  &solver->x[offset_local+ghosts],
78  mpi->is[d],mpi->ie[d],dim_local[d],0); CHECKERR(ierr);
79  offset_global += dim_global[d];
80  offset_local += dim_local [d] + 2*ghosts;
81  }
82  /* send the global grid to other ranks */
83  MPIBroadcast_double(Xg,size,0,&mpi->world);
84 
85  /* identify whether this is a 3D or "pseudo-2D" simulation */
86  IBIdentifyMode(Xg,dim_global,solver->ib);
87 
88  /* identify grid points inside the immersed body */
89  int count_inside_body = 0;
90  count = IBIdentifyBody(solver->ib,dim_global,dim_local,ghosts,mpi,Xg,solver->iblank);
91  MPISum_integer(&count_inside_body,&count,1,&mpi->world);
92  free(Xg);
93 
94  /* At ghost points corresponding to the physical boundary, extrapolate from the interior
95  (this should also work for bodies that are adjacent to physical boundaries). At interior
96  (MPI) boundaries, exchange iblank across MPI ranks.
97  */
98  int indexb[ndims], indexi[ndims], bounds[ndims], offset[ndims];
99  for (d = 0; d < ndims; d++) {
100  /* left boundary */
101  if (!mpi->ip[d]) {
102  _ArrayCopy1D_(dim_local,bounds,ndims); bounds[d] = ghosts;
103  _ArraySetValue_(offset,ndims,0); offset[d] = -ghosts;
104  int done = 0; _ArraySetValue_(indexb,ndims,0);
105  while (!done) {
106  _ArrayCopy1D_(indexb,indexi,ndims); indexi[d] = ghosts-1-indexb[d];
107  int p1; _ArrayIndex1DWO_(ndims,dim_local,indexb,offset,ghosts,p1);
108  int p2; _ArrayIndex1D_ (ndims,dim_local,indexi,ghosts,p2);
109  solver->iblank[p1] = solver->iblank[p2];
110  _ArrayIncrementIndex_(ndims,bounds,indexb,done);
111  }
112  }
113  /* right boundary */
114  if (mpi->ip[d] == mpi->iproc[d]-1) {
115  _ArrayCopy1D_(dim_local,bounds,ndims); bounds[d] = ghosts;
116  _ArraySetValue_(offset,ndims,0); offset[d] = dim_local[d];
117  int done = 0; _ArraySetValue_(indexb,ndims,0);
118  while (!done) {
119  _ArrayCopy1D_(indexb,indexi,ndims); indexi[d] = dim_local[d]-1-indexb[d];
120  int p1; _ArrayIndex1DWO_(ndims,dim_local,indexb,offset,ghosts,p1);
121  int p2; _ArrayIndex1D_ (ndims,dim_local,indexi,ghosts,p2);
122  solver->iblank[p1] = solver->iblank[p2];
123  _ArrayIncrementIndex_(ndims,bounds,indexb,done);
124  }
125  }
126  }
127  MPIExchangeBoundariesnD(ndims,1,dim_local,ghosts,mpi,solver->iblank);
128 
129  /* identify and create a list of immersed boundary points on each rank */
130  int count_boundary_points = 0;
131  count = IBIdentifyBoundary(solver->ib,mpi,dim_local,ghosts,solver->iblank);
132  MPISum_integer(&count_boundary_points,&count,1,&mpi->world);
133 
134  /* find the nearest facet for each immersed boundary point */
135  double ld = 0, xmin, xmax, ymin, ymax, zmin, zmax;
136  _GetCoordinate_(0,0 ,dim_local,ghosts,solver->x,xmin);
137  _GetCoordinate_(0,dim_local[0]-1,dim_local,ghosts,solver->x,xmax);
138  _GetCoordinate_(1,0 ,dim_local,ghosts,solver->x,ymin);
139  _GetCoordinate_(1,dim_local[1]-1,dim_local,ghosts,solver->x,ymax);
140  _GetCoordinate_(2,0 ,dim_local,ghosts,solver->x,zmin);
141  _GetCoordinate_(2,dim_local[2]-1,dim_local,ghosts,solver->x,zmax);
142  double xlen = xmax - xmin;
143  double ylen = ymax - ymin;
144  double zlen = zmax - zmin;
145  ld = max3(xlen,ylen,zlen);
146  count = IBNearestFacetNormal(solver->ib,mpi,solver->x,ld,dim_local,ghosts);
147  if (count) {
148  fprintf(stderr, "Error in InitializeImmersedBoundaries():\n");
149  fprintf(stderr, " IBNearestFacetNormal() returned with error code %d on rank %d.\n",
150  count, mpi->rank);
151  return(count);
152  }
153 
154  /* For the immersed boundary points, find the interior points for extrapolation,
155  and compute their interpolation coefficients */
156  count = IBInterpCoeffs(solver->ib,mpi,solver->x,dim_local,ghosts,solver->iblank);
157  if (count) {
158  fprintf(stderr, "Error in InitializeImmersedBoundaries():\n");
159  fprintf(stderr, " IBInterpCoeffs() returned with error code %d on rank %d.\n",
160  count, mpi->rank);
161  return(count);
162  }
163 
164  /* Create facet mapping */;
165  count = IBCreateFacetMapping(ib,mpi,solver->x,dim_local,ghosts);
166  if (count) {
167  fprintf(stderr, "Error in InitializeImmersedBoundaries():\n");
168  fprintf(stderr, " IBCreateFacetMapping() returned with error code %d on rank %d.\n",
169  count, mpi->rank);
170  return(count);
171  }
172 
173  /* Done */
174  if (!mpi->rank) {
175  double percentage;
176  printf("Immersed body read from %s:\n",solver->ib_filename);
177  if (nsims > 1) printf("For domain %d,\n", n);
178  printf(" Number of facets: %d\n Bounding box: [%3.1f,%3.1lf] X [%3.1f,%3.1lf] X [%3.1f,%3.1lf]\n",
179  body->nfacets,body->xmin,body->xmax,body->ymin,body->ymax,body->zmin,body->zmax);
180  percentage = ((double)count_inside_body)/((double)solver->npoints_global)*100.0;
181  printf(" Number of grid points inside immersed body: %d (%4.1f%%).\n",count_inside_body,percentage);
182  percentage = ((double)count_boundary_points)/((double)solver->npoints_global)*100.0;
183  printf(" Number of immersed boundary points : %d (%4.1f%%).\n",count_boundary_points,percentage);
184  printf(" Immersed body simulation mode : %s.\n", ib->mode);
185  }
186 
187  }
188 
189  return(0);
190 }
#define max3(a, b, c)
Definition: math_ops.h:27
int IBIdentifyMode(double *, int *, void *)
char ib_filename[_MAX_STRING_SIZE_]
Definition: hypar.h:439
Structure defining a body.
#define IERR
Definition: basic.h:16
Structure containing variables for immersed boundary implementation.
#define CHECKERR(ierr)
Definition: basic.h:18
Structure defining a simulation.
int IBNearestFacetNormal(void *, void *, double *, double, int *, int)
int MPIExchangeBoundariesnD(int, int, int *, int, void *, double *)
void * ib
Definition: hypar.h:443
int flag_ib
Definition: hypar.h:441
double * iblank
Definition: hypar.h:436
double * x
Definition: hypar.h:107
int IBCreateFacetMapping(void *, void *, double *, int *, int)
int ndims
Definition: hypar.h:26
int MPISum_integer(int *, int *, int, void *)
Definition: MPISum.c:16
int MPIBroadcast_double(double *, int, int, void *)
Definition: MPIBroadcast.c:9
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int MPIGatherArray1D(void *, double *, double *, int, int, int, int)
int npoints_global
Definition: hypar.h:40
MPI_Comm world
#define _ArrayIndex1D_(N, imax, i, ghost, index)
#define _ArrayIndex1DWO_(N, imax, i, offset, ghost, index)
#define _ArraySetValue_(x, size, value)
int * dim_local
Definition: hypar.h:37
int IBReadBodySTL(Body3D **, char *, void *, int *)
Definition: IBReadBodySTL.c:21
#define _IB_NDIMS_
int IBIdentifyBody(void *, int *, int *, int, void *, double *, double *)
#define _ArrayIncrementIndex_(N, imax, i, done)
int IBComputeBoundingBox(Body3D *)
#define _GetCoordinate_(dir, i, dim, ghosts, x, coord)
Definition: basic.h:31
int ghosts
Definition: hypar.h:52
Structure of MPI-related variables.
char mode[_MAX_STRING_SIZE_]
int IBInterpCoeffs(void *, void *, double *, int *, int, double *)
#define _ArrayCopy1D_(x, y, size)
int * dim_global
Definition: hypar.h:33
int IBIdentifyBoundary(void *, void *, int *, int, double *)

◆ InitializePhysics()

int InitializePhysics ( void *  s,
int  nsims 
)

Initialize the physics

Initialize the physical model for a simulation: Depending on the physical model specified, this function calls the initialization function for that physical model. The latter is responsible for setting all the physics-specific functions that are required by the model.

Parameters
sArray of simulation objects of type SimulationObject
nsimsnumber of simulation objects

Definition at line 38 of file InitializePhysics.c.

41 {
43  int ns;
45 
46  if (nsims == 0) return 0;
47 
48  if (!sim[0].mpi.rank) {
49  printf("Initializing physics. Model = \"%s\"\n",sim[0].solver.model);
50  }
51 
52  for (ns = 0; ns < nsims; ns++) {
53 
54  HyPar *solver = &(sim[ns].solver);
55  MPIVariables *mpi = &(sim[ns].mpi);
56 
57  /* Initialize physics-specific functions to NULL */
58  solver->ComputeCFL = NULL;
59  solver->ComputeDiffNumber = NULL;
60  solver->FFunction = NULL;
61  solver->dFFunction = NULL;
62  solver->FdFFunction = NULL;
63  solver->GFunction = NULL;
64  solver->HFunction = NULL;
65  solver->SFunction = NULL;
66  solver->UFunction = NULL;
67  solver->JFunction = NULL;
68  solver->KFunction = NULL;
69  solver->Upwind = NULL;
70  solver->UpwinddF = NULL;
71  solver->UpwindFdF = NULL;
72  solver->PreStage = NULL;
73  solver->PostStage = NULL;
74  solver->PreStep = NULL;
75  solver->PostStep = NULL;
76  solver->PrintStep = NULL;
77  solver->PhysicsOutput = NULL;
78  solver->PhysicsInput = NULL;
79  solver->AveragingFunction = NULL;
80  solver->GetLeftEigenvectors = NULL;
81  solver->GetRightEigenvectors = NULL;
82  solver->IBFunction = NULL;
83 
84  if (!strcmp(solver->model,_LINEAR_ADVECTION_DIFFUSION_REACTION_)) {
85 
86  solver->physics = (LinearADR*) calloc (1,sizeof(LinearADR));
87  IERR LinearADRInitialize(solver,mpi); CHECKERR(ierr);
88 
89  } else if (!strcmp(solver->model,_BURGERS_)) {
90 
91  solver->physics = (Burgers*) calloc (1,sizeof(Burgers));
92  IERR BurgersInitialize(solver,mpi); CHECKERR(ierr);
93 
94  } else if (!strcmp(solver->model,_FP_DOUBLE_WELL_)) {
95 
96  solver->physics = (FPDoubleWell*) calloc (1,sizeof(FPDoubleWell));
97  IERR FPDoubleWellInitialize(solver,mpi); CHECKERR(ierr);
98 
99  } else if (!strcmp(solver->model,_FP_POWER_SYSTEM_)) {
100 
101  solver->physics = (FPPowerSystem*) calloc (1,sizeof(FPPowerSystem));
102  IERR FPPowerSystemInitialize(solver,mpi); CHECKERR(ierr);
103 
104  } else if (!strcmp(solver->model,_FP_POWER_SYSTEM_1BUS_)) {
105 
106  solver->physics = (FPPowerSystem1Bus*) calloc (1,sizeof(FPPowerSystem1Bus));
107  IERR FPPowerSystem1BusInitialize(solver,mpi); CHECKERR(ierr);
108 
109  } else if (!strcmp(solver->model,_FP_POWER_SYSTEM_3BUS_)) {
110 
111  solver->physics = (FPPowerSystem3Bus*) calloc (1,sizeof(FPPowerSystem3Bus));
112  IERR FPPowerSystem3BusInitialize(solver,mpi); CHECKERR(ierr);
113 
114  } else if (!strcmp(solver->model,_EULER_1D_)) {
115 
116  solver->physics = (Euler1D*) calloc (1,sizeof(Euler1D));
117  IERR Euler1DInitialize(solver,mpi); CHECKERR(ierr);
118 
119  } else if (!strcmp(solver->model,_EULER_2D_)) {
120 
121  solver->physics = (Euler2D*) calloc (1,sizeof(Euler2D));
122  IERR Euler2DInitialize(solver,mpi); CHECKERR(ierr);
123 
124  } else if (!strcmp(solver->model,_NAVIER_STOKES_2D_)) {
125 
126  solver->physics = (NavierStokes2D*) calloc (1,sizeof(NavierStokes2D));
127  IERR NavierStokes2DInitialize(solver,mpi); CHECKERR(ierr);
128 
129  } else if (!strcmp(solver->model,_NAVIER_STOKES_3D_)) {
130 
131  solver->physics = (NavierStokes3D*) calloc (1,sizeof(NavierStokes3D));
132  IERR NavierStokes3DInitialize(solver,mpi); CHECKERR(ierr);
133 
134  } else if (!strcmp(solver->model,_NUMA2D_)) {
135 
136  solver->physics = (Numa2D*) calloc (1,sizeof(Numa2D));
137  IERR Numa2DInitialize(solver,mpi); CHECKERR(ierr);
138 
139  } else if (!strcmp(solver->model,_NUMA3D_)) {
140 
141  solver->physics = (Numa3D*) calloc (1,sizeof(Numa3D));
142  IERR Numa3DInitialize(solver,mpi); CHECKERR(ierr);
143 
144  } else if (!strcmp(solver->model,_SHALLOW_WATER_1D_)) {
145 
146  solver->physics = (ShallowWater1D*) calloc (1,sizeof(ShallowWater1D));
147  IERR ShallowWater1DInitialize(solver,mpi); CHECKERR(ierr);
148 
149  } else if (!strcmp(solver->model,_SHALLOW_WATER_2D_)) {
150 
151  solver->physics = (ShallowWater2D*) calloc (1,sizeof(ShallowWater2D));
152  IERR ShallowWater2DInitialize(solver,mpi); CHECKERR(ierr);
153 
154  } else if (!strcmp(solver->model,_VLASOV_)) {
155 
156  solver->physics = (Vlasov*) calloc (1,sizeof(Vlasov));
157  IERR VlasovInitialize(solver,mpi); CHECKERR(ierr);
158 
159  }else {
160 
161  fprintf(stderr,"Error (domain %d): %s is not a supported physical model.\n",
162  ns, solver->model);
163  return(1);
164 
165  }
166 
167  /* some checks */
168  if ( ( (solver->GetLeftEigenvectors == NULL) || (solver->GetRightEigenvectors == NULL) )
169  && (!strcmp(solver->interp_type,_CHARACTERISTIC_)) && (solver->nvars > 1) ) {
170  if (!mpi->rank) {
171  fprintf(stderr,"Error (domain %d): Interpolation type is defined as characteristic ", ns);
172  fprintf(stderr,"but physics initializations returned NULL pointers for ");
173  fprintf(stderr,"Get(Left,Right)Eigenvectors needed for characteristic-based ");
174  fprintf(stderr,"reconstruction.\n");
175  }
176  return(1);
177  }
178 
179  if (!strcmp(solver->SplitHyperbolicFlux,"yes")) {
180  if ((!solver->dFFunction) || (!solver->UpwinddF)) {
181  if (!mpi->rank) {
182  fprintf(stderr,"Error (domain %d): Splitting of hyperbolic flux requires a dFFunction ", ns);
183  fprintf(stderr,"and its upwinding function UpwinddF.\n");
184  fprintf(stderr,"Error: f(u) = [f(u) - df(u)] + df(u).\n");
185  fprintf(stderr,"Error: dFFunction or UpwinddF (or both) is (are) NULL.\n");
186  }
187  return(1);
188  }
189  if (solver->FdFFunction && solver->UpwindFdF) solver->flag_fdf_specified = 1;
190  else solver->flag_fdf_specified = 0;
191  }
192 
193  if ((solver->IBFunction == NULL) && (solver->flag_ib)) {
194  if (!mpi->rank) {
195  fprintf(stderr,"Error in InitializePhysics() (domain %d): Physical model %s does not yet have an immersed boundary treatment.\n",
196  ns, solver->model);
197  }
198  return(1);
199  }
200 
201  }
202 
203  return(0);
204 }
int Euler2DInitialize(void *, void *)
Definition: vlasov.h:57
int FPPowerSystemInitialize(void *, void *)
#define _FP_DOUBLE_WELL_
Definition: fpdoublewell.h:18
int(* PrintStep)(void *, void *, double)
Definition: hypar.h:344
#define _FP_POWER_SYSTEM_3BUS_
Structure containing variables and parameters specific to the 1D Shallow Water equations. This structure contains the physical parameters, variables, and function pointers specific to the 1D ShallowWater equations.
#define _LINEAR_ADVECTION_DIFFUSION_REACTION_
Definition: linearadr.h:21
int NavierStokes3DInitialize(void *, void *)
int(* PhysicsOutput)(void *, void *, double)
Definition: hypar.h:347
int(* GFunction)(double *, double *, int, void *, double)
Definition: hypar.h:310
int nvars
Definition: hypar.h:29
#define IERR
Definition: basic.h:16
#define CHECKERR(ierr)
Definition: basic.h:18
Structure defining a simulation.
Structure containing variables and parameters specific to the 1D Euler equations. This structure cont...
Definition: euler1d.h:273
char SplitHyperbolicFlux[_MAX_STRING_SIZE_]
Definition: hypar.h:92
#define _NAVIER_STOKES_2D_
Structure containing variables and parameters specific to the 3D Navier Stokes equations. This structure contains the physical parameters, variables, and function pointers specific to the 3D Navier-Stokes equations.
int VlasovInitialize(void *, void *)
#define _CHARACTERISTIC_
Definition: interpolation.h:33
#define _BURGERS_
Definition: burgers.h:14
int ShallowWater2DInitialize(void *, void *)
int(* KFunction)(double *, double *, void *, int, int)
Definition: hypar.h:331
int(* HFunction)(double *, double *, int, int, void *, double)
Definition: hypar.h:313
int(* AveragingFunction)(double *, double *, double *, void *)
Definition: hypar.h:354
#define _VLASOV_
Definition: vlasov.h:37
int flag_ib
Definition: hypar.h:441
int(* FdFFunction)(double *, double *, int, void *, double)
Definition: hypar.h:286
int(* SFunction)(double *, double *, void *, void *, double)
Definition: hypar.h:317
int(* IBFunction)(void *, void *, double *, double)
Definition: hypar.h:446
int(* PhysicsInput)(void *, void *, int, int, int *)
Definition: hypar.h:351
int(* Upwind)(double *, double *, double *, double *, double *, double *, int, void *, double)
Definition: hypar.h:295
Definition: numa3d.h:128
Structure containing variables and parameters specific to the linear advection-diffusion-reaction mod...
Definition: linearadr.h:37
int(* FFunction)(double *, double *, int, void *, double)
Definition: hypar.h:276
#define _FP_POWER_SYSTEM_1BUS_
#define _FP_POWER_SYSTEM_
Definition: fppowersystem.h:44
double(* ComputeCFL)(void *, void *, double, double)
Definition: hypar.h:269
int FPPowerSystem1BusInitialize(void *, void *)
int ShallowWater1DInitialize(void *, void *)
Structure containing variables and parameters specific to the 2D Shallow Water equations. This structure contains the physical parameters, variables, and function pointers specific to the 2D ShallowWater equations.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
Definition: numa2d.h:109
int Euler1DInitialize(void *, void *)
#define _SHALLOW_WATER_1D_
int(* PreStage)(int, double **, void *, void *, double)
Definition: hypar.h:334
int FPDoubleWellInitialize(void *, void *)
char interp_type[_MAX_STRING_SIZE_]
Definition: hypar.h:88
#define _SHALLOW_WATER_2D_
#define _EULER_1D_
Definition: euler1d.h:50
int(* PreStep)(double *, void *, void *, double)
Definition: hypar.h:339
int(* UpwindFdF)(double *, double *, double *, double *, double *, double *, int, void *, double)
Definition: hypar.h:307
Structure containing variables and parameters specific to the 2D Navier Stokes equations. This structure contains the physical parameters, variables, and function pointers specific to the 2D Navier-Stokes equations.
double(* ComputeDiffNumber)(void *, void *, double, double)
Definition: hypar.h:272
int flag_fdf_specified
Definition: hypar.h:290
int LinearADRInitialize(void *, void *)
int(* PostStep)(double *, void *, void *, double, int)
Definition: hypar.h:341
#define _NUMA3D_
Definition: numa3d.h:26
int(* dFFunction)(double *, double *, int, void *, double)
Definition: hypar.h:280
void * physics
Definition: hypar.h:266
int NavierStokes2DInitialize(void *, void *)
char model[_MAX_STRING_SIZE_]
Definition: hypar.h:263
int(* JFunction)(double *, double *, void *, int, int, int)
Definition: hypar.h:326
int BurgersInitialize(void *, void *)
int(* PostStage)(double *, void *, void *, double)
Definition: hypar.h:336
int(* UpwinddF)(double *, double *, double *, double *, double *, double *, int, void *, double)
Definition: hypar.h:300
#define _NUMA2D_
Definition: numa2d.h:21
#define _EULER_2D_
Definition: euler2d.h:25
Structure of MPI-related variables.
Structure containing variable and parameters specific to the 3-bus power system model. This structure contains the physical parameters and variables for the Fokker-Planck model for a 3-bus power system.
#define _DECLARE_IERR_
Definition: basic.h:17
#define _NAVIER_STOKES_3D_
int(* GetRightEigenvectors)(double *, double *, void *, int)
Definition: hypar.h:359
int(* GetLeftEigenvectors)(double *, double *, void *, int)
Definition: hypar.h:357
int FPPowerSystem3BusInitialize(void *, void *)
int(* UFunction)(double *, double *, int, void *, void *, double)
Definition: hypar.h:321
int Numa3DInitialize(void *, void *)
int Numa2DInitialize(void *, void *)

◆ InitializePhysicsData()

int InitializePhysicsData ( void *  s,
int  idx,
int  nsims,
int *  dim_data 
)

Initialize the physics data

For each simulation object, call the physics-specific function to read in any physics data that is not a part of the solution vector.

Parameters
sSimulation object of type SimulationObject
idxIndex of this simulation object
nsimsTotal number of simuations
dim_dataDimenions of physics-specific data

Definition at line 12 of file InitializePhysicsData.c.

17 {
19  HyPar *solver = &(sim->solver);
20  MPIVariables *mpi = &(sim->mpi);
21 
22  if (solver->PhysicsInput) {
23  int ierr = solver->PhysicsInput(solver, mpi, idx, nsims, dim_data);
24  if (ierr) {
25  fprintf(stderr, "Error in InitializePhysicsData():\n");
26  fprintf(stderr, " solver->PhysicsInput() returned error %d on rank %d\n",
27  ierr, mpi->rank);
28  return ierr;
29  }
30  }
31 
32  return 0;
33 }
Structure defining a simulation.
int(* PhysicsInput)(void *, void *, int, int, int *)
Definition: hypar.h:351
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
Structure of MPI-related variables.

◆ InitializeSolvers()

int InitializeSolvers ( void *  s,
int  nsims 
)

Initialize the solvers

This function initializes all solvers-specific function pointers depending on user input. The specific functions used for spatial discretization, time integration, and solution output are set here.

Parameters
sArray of simulation objects of type SimulationObject
nsimsnumber of simulation objects

Definition at line 52 of file InitializeSolvers.c.

55 {
57  int ns;
59 
60  if (nsims == 0) return 0;
61 
62  if (!sim[0].mpi.rank) {
63  printf("Initializing solvers.\n");
64  }
65 
66  for (ns = 0; ns < nsims; ns++) {
67 
68  HyPar *solver = &(sim[ns].solver);
69  MPIVariables *mpi = &(sim[ns].mpi);
70 
74 #if defined(HAVE_CUDA)
75  if (solver->use_gpu) {
77  } else {
78 #endif
80 #if defined(HAVE_CUDA)
81  }
82 #endif
87 
88  /* choose the type of parabolic discretization */
89  solver->ParabolicFunction = NULL;
90  solver->SecondDerivativePar = NULL;
91  solver->FirstDerivativePar = NULL;
92  solver->InterpolateInterfacesPar = NULL;
93 
94 #if defined(HAVE_CUDA)
95  if (solver->use_gpu) {
96 
97  if (!strcmp(solver->spatial_type_par,_NC_2STAGE_)) {
98 
100 
101  if (!strcmp(solver->spatial_scheme_par,_FOURTH_ORDER_CENTRAL_)) {
103  } else {
104  fprintf(stderr,"ERROR (domain %d): scheme %s is not supported on GPU!",
105  ns, solver->spatial_scheme_par);
106  return 1;
107  }
108 
109  }
110 
111  } else {
112 #endif
113 
114  if (!strcmp(solver->spatial_type_par,_NC_1STAGE_)) {
115 
117  if (!strcmp(solver->spatial_scheme_par,_SECOND_ORDER_CENTRAL_)) {
119  } else if (!strcmp(solver->spatial_scheme_par,_FOURTH_ORDER_CENTRAL_)) {
121  } else {
122  fprintf(stderr,"Error (domain %d): %s is not a supported ",
123  ns, solver->spatial_scheme_par);
124  fprintf(stderr,"spatial scheme of type %s for the parabolic terms.\n",
125  solver->spatial_type_par);
126  }
127 
128  } else if (!strcmp(solver->spatial_type_par,_NC_2STAGE_)) {
129 
131  if (!strcmp(solver->spatial_scheme_par,_SECOND_ORDER_CENTRAL_)) {
133  /* why first order? see ParabolicFunctionNC2Stage.c. 2nd order central
134  approximation to the 2nd derivative can be expressed as a conjugation
135  of 1st order approximations to the 1st derivative (one forward and
136  one backward) -- this prevents odd-even decoupling */
137  } else if (!strcmp(solver->spatial_scheme_par,_FOURTH_ORDER_CENTRAL_)) {
139  /* why 4th order? I could not derive the decomposition of the
140  4th order central approximation to the 2nd derivative! Some problems
141  may show odd-even decoupling */
142  } else {
143  fprintf(stderr,"Error (domain %d): %s is not a supported ",
144  ns, solver->spatial_scheme_par);
145  fprintf(stderr,"spatial scheme of type %s for the parabolic terms.\n",
146  solver->spatial_type_par);
147  }
148 
149  } else if (!strcmp(solver->spatial_type_par,_NC_1_5STAGE_)) {
150 
152  if (!strcmp(solver->spatial_scheme_par,_SECOND_ORDER_CENTRAL_)) {
155  } else if (!strcmp(solver->spatial_scheme_par,_FOURTH_ORDER_CENTRAL_)) {
158  } else {
159  fprintf(stderr,"Error (domain %d): %s is not a supported ",
160  ns, solver->spatial_scheme_par);
161  fprintf(stderr,"spatial scheme of type %s for the parabolic terms.\n",
162  solver->spatial_type_par);
163  }
164 
165  } else if (!strcmp(solver->spatial_type_par,_CONS_1STAGE_)) {
166 
168  if (!strcmp(solver->spatial_scheme_par,_SECOND_ORDER_CENTRAL_)) {
170  } else {
171  fprintf(stderr,"Error (domain %d): %s is not a supported ",
172  ns, solver->spatial_scheme_par);
173  fprintf(stderr,"spatial scheme of type %s for the parabolic terms.\n",
174  solver->spatial_type_par);
175  }
176 
177  } else {
178 
179  fprintf(stderr,"Error (domain %d): %s is not a supported ",
180  ns, solver->spatial_type_par);
181  fprintf(stderr,"spatial discretization type for the parabolic terms.\n");
182  return(1);
183 
184  }
185 
186 #if defined(HAVE_CUDA)
187  }
188 #endif
189 
190  /* Spatial interpolation for hyperbolic term */
191  solver->interp = NULL;
192  solver->compact = NULL;
193  solver->lusolver = NULL;
194  solver->SetInterpLimiterVar = NULL;
195  solver->flag_nonlinearinterp = 1;
196  if (strcmp(solver->interp_type,_CHARACTERISTIC_) && strcmp(solver->interp_type,_COMPONENTS_)) {
197  fprintf(stderr,"Error in InitializeSolvers() (domain %d): %s is not a ",
198  ns, solver->interp_type);
199  fprintf(stderr,"supported interpolation type.\n");
200  return(1);
201  }
202 
203 #if defined(HAVE_CUDA)
204  if (solver->use_gpu) {
205 
206  if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_WENO_)) {
207 
208  /* Fifth order WENO scheme */
209  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
210  fprintf(stderr,
211  "Error (domain %d): characteristic-based WENO5 is not yet implemented on GPUs.\n",
212  ns );
213  return 1;
214  } else {
216  }
217  solver->interp = (WENOParameters*) calloc(1,sizeof(WENOParameters));
218  IERR WENOInitialize(solver,mpi,solver->spatial_scheme_hyp,solver->interp_type); CHECKERR(ierr);
219  solver->flag_nonlinearinterp = !(((WENOParameters*)solver->interp)->no_limiting);
220 
221  } else {
222 
223  fprintf(stderr,
224  "Error (domain %d): %s is a not a supported spatial interpolation scheme on GPUs.\n",
225  ns, solver->spatial_scheme_hyp);
226  return 1;
227  }
228 
229  } else {
230 #endif
231 
232  if (!strcmp(solver->spatial_scheme_hyp,_FIRST_ORDER_UPWIND_)) {
233 
234  /* First order upwind scheme */
235  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
237  } else {
239  }
240 
241  } else if (!strcmp(solver->spatial_scheme_hyp,_SECOND_ORDER_CENTRAL_)) {
242 
243  /* Second order central scheme */
244  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
246  } else {
248  }
249 
250  } else if (!strcmp(solver->spatial_scheme_hyp,_SECOND_ORDER_MUSCL_)) {
251 
252  /* Second order MUSCL scheme */
253  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
255  } else {
257  }
258  solver->interp = (MUSCLParameters*) calloc(1,sizeof(MUSCLParameters));
259  IERR MUSCLInitialize(solver,mpi); CHECKERR(ierr);
260 
261  } else if (!strcmp(solver->spatial_scheme_hyp,_THIRD_ORDER_MUSCL_)) {
262 
263  /* Third order MUSCL scheme */
264  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
266  } else {
268  }
269  solver->interp = (MUSCLParameters*) calloc(1,sizeof(MUSCLParameters));
270  IERR MUSCLInitialize(solver,mpi); CHECKERR(ierr);
271 
272  } else if (!strcmp(solver->spatial_scheme_hyp,_FOURTH_ORDER_CENTRAL_)) {
273 
274  /* Fourth order central scheme */
275  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
277  } else {
279  }
280 
281  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_UPWIND_)) {
282 
283  /* Fifth order upwind scheme */
284  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
286  } else {
288  }
289 
290  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_COMPACT_UPWIND_)) {
291 
292  /* Fifth order compact upwind scheme */
293  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
295  } else {
297  }
298  solver->compact = (CompactScheme*) calloc(1,sizeof(CompactScheme));
299  IERR CompactSchemeInitialize(solver,mpi,solver->interp_type);
300  solver->lusolver = (TridiagLU*) calloc (1,sizeof(TridiagLU));
301  IERR tridiagLUInit(solver->lusolver,&mpi->world);CHECKERR(ierr);
302 
303  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_WENO_)) {
304 
305  /* Fifth order WENO scheme */
306  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
308  } else {
310  }
311  solver->interp = (WENOParameters*) calloc(1,sizeof(WENOParameters));
312  IERR WENOInitialize(solver,mpi,solver->spatial_scheme_hyp,solver->interp_type); CHECKERR(ierr);
313  solver->flag_nonlinearinterp = !(((WENOParameters*)solver->interp)->no_limiting);
314 
315  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) {
316 
317  /* Fifth order CRWENO scheme */
318  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
320  } else {
322  }
323  solver->interp = (WENOParameters*) calloc(1,sizeof(WENOParameters));
324  IERR WENOInitialize(solver,mpi,solver->spatial_scheme_hyp,solver->interp_type); CHECKERR(ierr);
325  solver->flag_nonlinearinterp = !(((WENOParameters*)solver->interp)->no_limiting);
326  solver->compact = (CompactScheme*) calloc(1,sizeof(CompactScheme));
327  IERR CompactSchemeInitialize(solver,mpi,solver->interp_type);
328  solver->lusolver = (TridiagLU*) calloc (1,sizeof(TridiagLU));
329  IERR tridiagLUInit(solver->lusolver,&mpi->world);CHECKERR(ierr);
330 
331  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_HCWENO_)) {
332 
333  /* Fifth order HCWENO scheme */
334  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
336  } else {
338  }
339  solver->interp = (WENOParameters*) calloc(1,sizeof(WENOParameters));
340  IERR WENOInitialize(solver,mpi,solver->spatial_scheme_hyp,solver->interp_type); CHECKERR(ierr);
341  solver->flag_nonlinearinterp = !(((WENOParameters*)solver->interp)->no_limiting);
342  solver->compact = (CompactScheme*) calloc(1,sizeof(CompactScheme));
343  IERR CompactSchemeInitialize(solver,mpi,solver->interp_type);
344  solver->lusolver = (TridiagLU*) calloc (1,sizeof(TridiagLU));
345  IERR tridiagLUInit(solver->lusolver,&mpi->world);CHECKERR(ierr);
346 
347  } else {
348 
349  fprintf(stderr,"Error (domain %d): %s is a not a supported spatial interpolation scheme.\n",
350  ns, solver->spatial_scheme_hyp);
351  return(1);
352  }
353 
354 #if defined(HAVE_CUDA)
355  }
356 #endif
357 
358  /* Time integration */
359  solver->time_integrator = NULL;
360 #ifdef with_petsc
361  if (solver->use_petscTS) {
362  /* dummy -- not used */
364  solver->msti = NULL;
365  } else {
366  if (!strcmp(solver->time_scheme,_FORWARD_EULER_)) {
368  solver->msti = NULL;
369  } else if (!strcmp(solver->time_scheme,_RK_)) {
370  solver->TimeIntegrate = TimeRK;
371  solver->msti = (ExplicitRKParameters*) calloc (1,sizeof(ExplicitRKParameters));
373  solver->msti,mpi); CHECKERR(ierr);
374  } else if (!strcmp(solver->time_scheme,_GLM_GEE_)) {
375  solver->TimeIntegrate = TimeGLMGEE;
376  solver->msti = (GLMGEEParameters*) calloc (1,sizeof(GLMGEEParameters));
378  solver->msti,mpi); CHECKERR(ierr);
379  } else {
380  fprintf(stderr,"Error (domain %d): %s is a not a supported time-integration scheme.\n",
381  ns, solver->time_scheme);
382  return(1);
383  }
384  }
385 #else
386  if (!strcmp(solver->time_scheme,_FORWARD_EULER_)) {
388  solver->msti = NULL;
389  } else if (!strcmp(solver->time_scheme,_RK_)) {
390  solver->TimeIntegrate = TimeRK;
391  solver->msti = (ExplicitRKParameters*) calloc (1,sizeof(ExplicitRKParameters));
393  solver->msti,mpi); CHECKERR(ierr);
394  } else if (!strcmp(solver->time_scheme,_GLM_GEE_)) {
395  solver->TimeIntegrate = TimeGLMGEE;
396  solver->msti = (GLMGEEParameters*) calloc (1,sizeof(GLMGEEParameters));
398  solver->msti,mpi); CHECKERR(ierr);
399  } else {
400  fprintf(stderr,"Error (domain %d): %s is a not a supported time-integration scheme.\n",
401  ns, solver->time_scheme);
402  return(1);
403  }
404 #endif
405 
406  /* Solution output function */
407  solver->WriteOutput = NULL; /* default - no output */
408  solver->filename_index = NULL;
409  strcpy(solver->op_fname_root, "op");
410 #ifdef with_librom
411  strcpy(solver->op_rom_fname_root, "op_rom");
412 #endif
413  strcpy(solver->aux_op_fname_root, "ts0");
414  if (!strcmp(solver->output_mode,"serial")) {
415  solver->index_length = 5;
416  solver->filename_index = (char*) calloc (solver->index_length+1,sizeof(char));
417  int i; for (i=0; i<solver->index_length; i++) solver->filename_index[i] = '0';
418  solver->filename_index[solver->index_length] = (char) 0;
419  if (!strcmp(solver->op_file_format,"text")) {
420  solver->WriteOutput = WriteText;
421  strcpy(solver->solnfilename_extn,".dat");
422  } else if (!strcmp(solver->op_file_format,"tecplot2d")) {
423  solver->WriteOutput = WriteTecplot2D;
424  strcpy(solver->solnfilename_extn,".dat");
425  } else if (!strcmp(solver->op_file_format,"tecplot3d")) {
426  solver->WriteOutput = WriteTecplot3D;
427  strcpy(solver->solnfilename_extn,".dat");
428  } else if ((!strcmp(solver->op_file_format,"binary")) || (!strcmp(solver->op_file_format,"bin"))) {
429  solver->WriteOutput = WriteBinary;
430  strcpy(solver->solnfilename_extn,".bin");
431  } else if (!strcmp(solver->op_file_format,"none")) {
432  solver->WriteOutput = NULL;
433  } else {
434  fprintf(stderr,"Error (domain %d): %s is not a supported file format.\n",
435  ns, solver->op_file_format);
436  return(1);
437  }
438  if ((!strcmp(solver->op_overwrite,"no")) && solver->restart_iter) {
439  /* if it's a restart run, fast-forward the filename */
440  int t;
441  for (t=0; t<solver->restart_iter; t++)
442  if ((t+1)%solver->file_op_iter == 0) IncrementFilenameIndex(solver->filename_index,solver->index_length);
443  }
444  } else if (!strcmp(solver->output_mode,"parallel")) {
445  if (!strcmp(solver->op_file_format,"none")) solver->WriteOutput = NULL;
446  else {
447  /* only binary file writing supported in parallel mode */
448  /* use post-processing scripts to convert */
449  solver->WriteOutput = WriteBinary;
450  strcpy(solver->solnfilename_extn,".bin");
451  }
452  } else {
453  fprintf(stderr,"Error (domain %d): %s is not a supported output mode.\n",
454  ns, solver->output_mode);
455  fprintf(stderr,"Should be \"serial\" or \"parallel\". \n");
456  return(1);
457  }
458 
459  /* Solution plotting function */
460  strcpy(solver->plotfilename_extn,".png");
461 #ifdef with_python
462  solver->py_plt_func = NULL;
463  solver->py_plt_func_args = NULL;
464  {
465  char python_plotting_fname[_MAX_STRING_SIZE_] = "plotSolution";
466  PyObject* py_plot_name = PyUnicode_DecodeFSDefault(python_plotting_fname);
467  PyObject* py_plot_module = PyImport_Import(py_plot_name);
468  Py_DECREF(py_plot_name);
469  if (py_plot_module) {
470  solver->py_plt_func = PyObject_GetAttrString(py_plot_module, "plotSolution");
471  if (!solver->py_plt_func) {
472  if (!mpi->rank) {
473  printf("Unable to load plotSolution function from Python module.\n");
474  }
475  } else {
476  if (!mpi->rank) {
477  printf("Loaded Python module for plotting.\n");
478  printf("Loaded plotSolution function from Python module.\n");
479  }
480  }
481  } else {
482  if (!mpi->rank) {
483  printf("Unable to load Python module for plotting.\n");
484  }
485  }
486  }
487 #endif
488 
489  }
490 
491  return(0);
492 }
int Interp1PrimSecondOrderCentral(double *, double *, double *, double *, int, int, void *, void *, int)
2nd order central reconstruction (component-wise) on a uniform grid
int FirstDerivativeFourthOrderCentral(double *, double *, int, int, void *, void *)
int TimeRK(void *)
Definition: TimeRK.c:35
#define _FIRST_ORDER_UPWIND_
Definition: interpolation.h:12
int Interp1PrimFifthOrderCRWENOChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order CRWENO reconstruction (characteristic-based) on a uniform grid
#define _FIFTH_ORDER_WENO_
Definition: interpolation.h:26
int Interp1PrimFifthOrderCRWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order CRWENO reconstruction (component-wise) on a uniform grid
#define _FIFTH_ORDER_HCWENO_
Definition: interpolation.h:30
#define _FOURTH_ORDER_CENTRAL_
int WriteBinary(int, int, int *, double *, double *, char *, int *)
Definition: WriteBinary.c:34
int nvars
Definition: hypar.h:29
int(* SecondDerivativePar)(double *, double *, int, void *, void *)
Definition: hypar.h:247
int WriteTecplot3D(int, int, int *, double *, double *, char *, int *)
#define IERR
Definition: basic.h:16
int SourceFunction(double *, double *, void *, void *, double)
char * filename_index
Definition: hypar.h:197
int Interp1PrimSecondOrderMUSCL(double *, double *, double *, double *, int, int, void *, void *, int)
2nd order MUSCL scheme (component-wise) on a uniform grid
#define CHECKERR(ierr)
Definition: basic.h:18
int(* ParabolicFunction)(double *, double *, void *, void *, double)
Definition: hypar.h:256
Structure defining a simulation.
void * time_integrator
Definition: hypar.h:165
int ParabolicFunctionNC1_5Stage(double *, double *, void *, void *, double)
int SecondDerivativeFourthOrderCentral(double *, double *, int, void *, void *)
int Interp1PrimFifthOrderCompactUpwindChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order compact upwind reconstruction (characteristic-based) on a uniform grid
int WENOInitialize(void *, void *, char *, char *)
int Interp1PrimFifthOrderUpwindChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order upwind reconstruction (characteristic-based) on a uniform grid
char plotfilename_extn[_MAX_STRING_SIZE_]
Definition: hypar.h:203
int tridiagLUInit(void *, void *)
Definition: tridiagLUInit.c:39
char op_rom_fname_root[_MAX_STRING_SIZE_]
Definition: hypar.h:407
int Interp1PrimSecondOrderMUSCLChar(double *, double *, double *, double *, int, int, void *, void *, int)
2nd order MUSCL scheme (characteristic-based) on a uniform grid
int HyperbolicFunction(double *, double *, void *, void *, double, int, int(*)(double *, double *, int, void *, double), int(*)(double *, double *, double *, double *, double *, double *, int, void *, double))
int VolumeIntegral(double *, double *, void *, void *)
int FirstDerivativeFirstOrder(double *, double *, int, int, void *, void *)
int(* ApplyBoundaryConditions)(void *, void *, double *, double *, double)
Definition: hypar.h:214
#define _THIRD_ORDER_MUSCL_
Definition: interpolation.h:18
int ParabolicFunctionNC1Stage(double *, double *, void *, void *, double)
#define _CHARACTERISTIC_
Definition: interpolation.h:33
int(* NonlinearInterp)(double *, void *, void *, double, int(*)(double *, double *, int, void *, double))
Definition: hypar.h:228
#define _FORWARD_EULER_
int Interp1PrimFifthOrderUpwind(double *, double *, double *, double *, int, int, void *, void *, int)
5th order upwind reconstruction (component-wise) on a uniform grid
#define _COMPONENTS_
Definition: interpolation.h:34
#define _NC_1_5STAGE_
Definition: hypar.h:481
int(* CalculateConservationError)(void *, void *)
Definition: hypar.h:392
int SecondDerivativeSecondOrderCentral(double *, double *, int, void *, void *)
char output_mode[_MAX_STRING_SIZE_]
Definition: hypar.h:183
char spatial_scheme_par[_MAX_STRING_SIZE_]
Definition: hypar.h:99
int(* ApplyIBConditions)(void *, void *, double *, double)
Definition: hypar.h:217
int TimeGLMGEE(void *)
Definition: TimeGLMGEE.c:45
void * py_plt_func
Definition: hypar.h:466
int MUSCLInitialize(void *, void *)
int restart_iter
Definition: hypar.h:58
int use_petscTS
Definition: hypar.h:395
int Interp1PrimFifthOrderHCWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order hybrid compact-WENO reconstruction (component-wise) on a uniform grid
char aux_op_fname_root[_MAX_STRING_SIZE_]
Definition: hypar.h:208
int(* SourceFunction)(double *, double *, void *, void *, double)
Definition: hypar.h:259
#define _SECOND_ORDER_CENTRAL_
int(* TimeIntegrate)(void *)
Definition: hypar.h:220
#define _SECOND_ORDER_MUSCL_
Definition: interpolation.h:16
int WriteText(int, int, int *, double *, double *, char *, int *)
Definition: WriteText.c:27
int(* SetInterpLimiterVar)(double *, double *, double *, int, void *, void *)
Definition: hypar.h:234
int(* HyperbolicFunction)(double *, double *, void *, void *, double, int, int(*)(double *, double *, int, void *, double), int(*)(double *, double *, double *, double *, double *, double *, int, void *, double))
Definition: hypar.h:250
int TimeForwardEuler(void *)
int(* VolumeIntegralFunction)(double *, double *, void *, void *)
Definition: hypar.h:388
int CompactSchemeInitialize(void *, void *, char *)
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _MAX_STRING_SIZE_
Definition: basic.h:14
char spatial_scheme_hyp[_MAX_STRING_SIZE_]
Definition: hypar.h:84
int NonLinearInterpolation(double *, void *, void *, double, int(*)(double *, double *, int, void *, double))
int Interp1PrimFirstOrderUpwindChar(double *, double *, double *, double *, int, int, void *, void *, int)
1st order upwind reconstruction (characteristic-based) on a uniform grid
int(* BoundaryIntegralFunction)(void *, void *)
Definition: hypar.h:390
#define _NC_1STAGE_
Definition: hypar.h:475
char time_scheme[_MAX_STRING_SIZE_]
Definition: hypar.h:78
char op_overwrite[_MAX_STRING_SIZE_]
Definition: hypar.h:191
int Interp1PrimFifthOrderWENOChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order WENO reconstruction (characteristic-based) on a uniform grid
MPI_Comm world
int gpuFirstDerivativeFourthOrderCentral(double *, double *, int, int, void *, void *)
char interp_type[_MAX_STRING_SIZE_]
Definition: hypar.h:88
int(* InterpolateInterfacesHyp)(double *, double *, double *, double *, int, int, void *, void *, int)
Definition: hypar.h:224
int index_length
Definition: hypar.h:199
void * lusolver
Definition: hypar.h:368
char op_fname_root[_MAX_STRING_SIZE_]
Definition: hypar.h:206
int ApplyBoundaryConditions(void *, void *, double *, double *, double)
Applies the boundary conditions specified for each boundary zone.
int Interp1PrimThirdOrderMUSCLChar(double *, double *, double *, double *, int, int, void *, void *, int)
3rd order MUSCL scheme with Koren&#39;s limiter (characteristic-based) on a uniform grid ...
int Interp1PrimFourthOrderCentral(double *, double *, double *, double *, int, int, void *, void *, int)
4th order central reconstruction (component-wise) on a uniform grid
char solnfilename_extn[_MAX_STRING_SIZE_]
Definition: hypar.h:201
char spatial_type_par[_MAX_STRING_SIZE_]
Definition: hypar.h:96
Structure of variables/parameters needed by the WENO-type scheme.
int ParabolicFunctionCons1Stage(double *, double *, void *, void *, double)
Structure of variables/parameters needed by the compact schemes.
void * interp
Definition: hypar.h:362
#define _GLM_GEE_
#define _RK_
int WriteTecplot2D(int, int, int *, double *, double *, char *, int *)
int(* FirstDerivativePar)(double *, double *, int, int, void *, void *)
Definition: hypar.h:243
int TimeGLMGEEInitialize(char *, char *, void *, void *)
int(* InterpolateInterfacesPar)(double *, double *, int, void *, void *)
Definition: hypar.h:239
#define _FIFTH_ORDER_CRWENO_
Definition: interpolation.h:28
int Interp1PrimFifthOrderHCWENOChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order hybrid compact-WENO reconstruction (characteristic-based) on a uniform grid ...
int Interp1PrimSecondOrderCentralChar(double *, double *, double *, double *, int, int, void *, void *, int)
2nd order central reconstruction (characteristic-based) on a uniform grid
int flag_nonlinearinterp
Definition: hypar.h:411
Structure of variables/parameters needed by the MUSCL scheme.
int(* WriteOutput)(int, int, int *, double *, double *, char *, int *)
Definition: hypar.h:211
void IncrementFilenameIndex(char *, int)
char time_scheme_type[_MAX_STRING_SIZE_]
Definition: hypar.h:81
#define _FIFTH_ORDER_COMPACT_UPWIND_
Definition: interpolation.h:24
int Interp1PrimFifthOrderCompactUpwind(double *, double *, double *, double *, int, int, void *, void *, int)
5th order compact upwind reconstruction (component-wise) on a uniform grid
int Interp1PrimFourthOrderCentralChar(double *, double *, double *, double *, int, int, void *, void *, int)
4th order central reconstruction (characteristic-based) on a uniform grid
Structure of MPI-related variables.
#define _FIFTH_ORDER_UPWIND_
Definition: interpolation.h:22
int use_gpu
Definition: hypar.h:449
int Interp2PrimSecondOrder(double *, double *, int, void *, void *)
2nd order component-wise interpolation of the 2nd primitive on a uniform grid
int gpuHyperbolicFunction(double *, double *, void *, void *, double, int, int(*)(double *, double *, int, void *, double), int(*)(double *, double *, double *, double *, double *, double *, int, void *, double))
Structure containing the parameters for an explicit Runge-Kutta method.
int FirstDerivativeSecondOrderCentral(double *, double *, int, int, void *, void *)
Structure containing the parameters for the General Linear Methods with Global Error Estimators (GLM-...
void * msti
Definition: hypar.h:366
int BoundaryIntegral(void *, void *)
char op_file_format[_MAX_STRING_SIZE_]
Definition: hypar.h:186
#define _DECLARE_IERR_
Definition: basic.h:17
#define _CONS_1STAGE_
Definition: hypar.h:483
int Interp1PrimThirdOrderMUSCL(double *, double *, double *, double *, int, int, void *, void *, int)
3rd order MUSCL scheme with Koren&#39;s limiter (component-wise) on a uniform grid
int ParabolicFunctionNC2Stage(double *, double *, void *, void *, double)
int ApplyIBConditions(void *, void *, double *, double)
void * py_plt_func_args
Definition: hypar.h:467
void * compact
Definition: hypar.h:364
int TimeExplicitRKInitialize(char *, char *, void *, void *)
int file_op_iter
Definition: hypar.h:171
int gpuInterp1PrimFifthOrderWENO(double *, double *, double *, double *, int, int, void *, void *, int)
#define _NC_2STAGE_
Definition: hypar.h:477
int Interp1PrimFifthOrderWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order WENO reconstruction (component-wise) on a uniform grid
int CalculateConservationError(void *, void *)
int Interp1PrimFirstOrderUpwind(double *, double *, double *, double *, int, int, void *, void *, int)
1st order upwind reconstruction (component-wise) on a uniform grid

◆ Cleanup()

int Cleanup ( void *  s,
int  nsims 
)

Clean up: deallocate all arrays and objects

Cleans up and frees the memory after the completion of the simulation.

Parameters
sArray of simulation objects of type SimulationObject
nsimsnumber of simulation objects

Definition at line 39 of file Cleanup.c.

42 {
44  int ns;
46 
47  if (nsims == 0) return 0;
48 
49  if (!sim[0].mpi.rank) {
50  printf("Deallocating arrays.\n");
51  }
52 
53  for (ns = 0; ns < nsims; ns++) {
54 
55  if (sim[ns].is_barebones == 1) {
56  fprintf(stderr, "Error in Cleanup(): object is barebones type.\n");
57  return 1;
58  }
59 
60  HyPar* solver = &(sim[ns].solver);
61  MPIVariables* mpi = &(sim[ns].mpi);
62  DomainBoundary* boundary = (DomainBoundary*) solver->boundary;
63  int i;
64 
65  /* Clean up boundary zones */
66  for (i = 0; i < solver->nBoundaryZones; i++) {
67 #if defined(HAVE_CUDA)
68  BCCleanup(&boundary[i], solver->use_gpu);
69 #else
70  BCCleanup(&boundary[i], 0);
71 #endif
72  }
73  free(solver->boundary);
74 
75  /* Clean up immersed boundaries */
76  if (solver->flag_ib) {
77  IERR IBCleanup(solver->ib);
78  free(solver->ib);
79  }
80 
81  /* Clean up any allocations in physical model */
82  if (!strcmp(solver->model,_LINEAR_ADVECTION_DIFFUSION_REACTION_)) {
83  IERR LinearADRCleanup(solver->physics); CHECKERR(ierr);
84  } else if (!strcmp(solver->model,_FP_DOUBLE_WELL_)) {
85  IERR FPDoubleWellCleanup(solver->physics); CHECKERR(ierr);
86  } else if (!strcmp(solver->model,_FP_POWER_SYSTEM_)) {
87  IERR FPPowerSystemCleanup(solver->physics); CHECKERR(ierr);
88  } else if (!strcmp(solver->model,_FP_POWER_SYSTEM_1BUS_)) {
90  } else if (!strcmp(solver->model,_FP_POWER_SYSTEM_3BUS_)) {
92  } else if (!strcmp(solver->model,_EULER_1D_)) {
93  IERR Euler1DCleanup(solver->physics); CHECKERR(ierr);
94  } else if (!strcmp(solver->model,_EULER_2D_)) {
95  IERR Euler2DCleanup(solver->physics); CHECKERR(ierr);
96  } else if (!strcmp(solver->model,_NAVIER_STOKES_2D_)) {
98  } else if (!strcmp(solver->model,_NAVIER_STOKES_3D_)) {
100 #if defined(HAVE_CUDA)
101  if (solver->use_gpu) {
103  }
104 #endif
105  } else if (!strcmp(solver->model,_NUMA2D_)) {
106  IERR Numa2DCleanup(solver->physics); CHECKERR(ierr);
107  } else if (!strcmp(solver->model,_NUMA3D_)) {
108  IERR Numa3DCleanup(solver->physics); CHECKERR(ierr);
109  } else if (!strcmp(solver->model,_SHALLOW_WATER_1D_)) {
110  IERR ShallowWater1DCleanup(solver->physics); CHECKERR(ierr);
111  } else if (!strcmp(solver->model,_SHALLOW_WATER_2D_)) {
112  IERR ShallowWater2DCleanup(solver->physics); CHECKERR(ierr);
113  } else if (!strcmp(solver->model,_VLASOV_)) {
114  IERR VlasovCleanup(solver->physics); CHECKERR(ierr);
115  }
116  free(solver->physics);
117 
118  /* Clean up any allocations from time-integration */
119 #ifdef with_petsc
120  if (!solver->use_petscTS) {
121  if (!strcmp(solver->time_scheme,_RK_)) {
122  IERR TimeExplicitRKCleanup(solver->msti); CHECKERR(ierr);
123  free(solver->msti);
124  } else if (!strcmp(solver->time_scheme,_GLM_GEE_)) {
125  IERR TimeGLMGEECleanup(solver->msti); CHECKERR(ierr);
126  free(solver->msti);
127  }
128  }
129 #else
130  if (!strcmp(solver->time_scheme,_RK_)) {
131  IERR TimeExplicitRKCleanup(solver->msti); CHECKERR(ierr);
132  free(solver->msti);
133  } else if (!strcmp(solver->time_scheme,_GLM_GEE_)) {
134  IERR TimeGLMGEECleanup(solver->msti); CHECKERR(ierr);
135  free(solver->msti);
136  }
137 #endif
138 
139  /* Clean up any spatial reconstruction related allocations */
140  if ( (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_WENO_ ))
141  || (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_))
142  || (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_HCWENO_)) ) {
143 #if defined(HAVE_CUDA)
144  IERR WENOCleanup(solver->interp, solver->use_gpu); CHECKERR(ierr);
145 #else
146  IERR WENOCleanup(solver->interp, 0); CHECKERR(ierr);
147 #endif
148  }
149  if (solver->interp) free(solver->interp);
150  if ( (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_COMPACT_UPWIND_ ))
151  || (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_ ))
152  || (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_HCWENO_ )) ) {
153  IERR CompactSchemeCleanup(solver->compact); CHECKERR(ierr);
154  }
155  if (solver->compact) free(solver->compact);
156  if (solver->lusolver) free(solver->lusolver);
157 
158  /* Free the communicators created */
159  IERR MPIFreeCommunicators(solver->ndims,mpi); CHECKERR(ierr);
160 
161  /* These variables are allocated in Initialize.c */
162  free(solver->dim_global);
163  free(solver->dim_global_ex);
164  free(solver->dim_local);
165  free(solver->index);
166  free(solver->u);
167 #ifdef with_petsc
168  if (solver->u0) free(solver->u0);
169  if (solver->uref) free(solver->uref);
170  if (solver->rhsref) free(solver->rhsref);
171  if (solver->rhs) free(solver->rhs);
172 #endif
173 #ifdef with_librom
174  free(solver->u_rom_predicted);
175 #endif
176  free(solver->iblank);
177  free(solver->x);
178  free(solver->dxinv);
179  free(solver->isPeriodic);
180  free(mpi->iproc);
181  free(mpi->ip);
182  free(mpi->is);
183  free(mpi->ie);
184  free(mpi->bcperiodic);
185  free(mpi->sendbuf);
186  free(mpi->recvbuf);
187  free(solver->VolumeIntegral);
188  free(solver->VolumeIntegralInitial);
189  free(solver->TotalBoundaryIntegral);
190  free(solver->ConservationError);
191  free(solver->stride_with_ghosts);
192  free(solver->stride_without_ghosts);
193 
194 #if defined(HAVE_CUDA)
195  if (solver->use_gpu) {
196  gpuFree(solver->hyp);
197  gpuFree(solver->par);
198  gpuFree(solver->source);
199  gpuFree(solver->uC);
200  gpuFree(solver->fluxC);
201  gpuFree(solver->Deriv1);
202  gpuFree(solver->Deriv2);
203  gpuFree(solver->fluxI);
204  gpuFree(solver->uL);
205  gpuFree(solver->uR);
206  gpuFree(solver->fL);
207  gpuFree(solver->fR);
208  gpuFree(solver->StageBoundaryBuffer);
210  gpuFree(solver->StepBoundaryIntegral);
211 
212  gpuFree(solver->gpu_dim_local);
213  gpuFree(solver->gpu_iblank);
214  gpuFree(solver->gpu_x);
215  gpuFree(solver->gpu_dxinv);
216  gpuFree(solver->gpu_u);
217  } else {
218 #endif
219  free(solver->hyp);
220  free(solver->par);
221  free(solver->source);
222  free(solver->uC);
223  free(solver->fluxC);
224  free(solver->Deriv1);
225  free(solver->Deriv2);
226  free(solver->fluxI);
227  free(solver->uL);
228  free(solver->uR);
229  free(solver->fL);
230  free(solver->fR);
231  free(solver->StageBoundaryIntegral);
232  free(solver->StepBoundaryIntegral);
233 #if defined(HAVE_CUDA)
234  }
235 #endif
236 
237  if (solver->filename_index) free(solver->filename_index);
238 
239  }
240 
241  return(0);
242 }
#define _FP_DOUBLE_WELL_
Definition: fpdoublewell.h:18
int WENOCleanup(void *, int)
Definition: WENOCleanup.c:17
double * Deriv2
Definition: hypar.h:151
#define _FP_POWER_SYSTEM_3BUS_
#define _FIFTH_ORDER_WENO_
Definition: interpolation.h:26
#define _LINEAR_ADVECTION_DIFFUSION_REACTION_
Definition: linearadr.h:21
int Numa3DCleanup(void *)
Definition: Numa3DCleanup.c:5
#define _FIFTH_ORDER_HCWENO_
Definition: interpolation.h:30
int LinearADRCleanup(void *)
double * StageBoundaryBuffer
Definition: hypar.h:462
int NavierStokes2DCleanup(void *)
double * recvbuf
#define IERR
Definition: basic.h:16
char * filename_index
Definition: hypar.h:197
int NavierStokes3DCleanup(void *)
#define CHECKERR(ierr)
Definition: basic.h:18
Structure defining a simulation.
void * boundary
Definition: hypar.h:159
#define _NAVIER_STOKES_2D_
double * par
Definition: hypar.h:122
int * dim_global_ex
Definition: hypar.h:75
int BCCleanup(void *, int)
Definition: BCCleanup.c:12
int VlasovCleanup(void *)
Definition: VlasovCleanup.c:10
double * gpu_iblank
Definition: hypar.h:456
double * gpu_dxinv
Definition: hypar.h:458
double * u
Definition: hypar.h:116
int Euler2DCleanup(void *)
Definition: Euler2DCleanup.c:4
void * ib
Definition: hypar.h:443
#define _VLASOV_
Definition: vlasov.h:37
double * ConservationError
Definition: hypar.h:374
int flag_ib
Definition: hypar.h:441
double * iblank
Definition: hypar.h:436
int ShallowWater1DCleanup(void *)
double * uR
Definition: hypar.h:139
double * x
Definition: hypar.h:107
double * gpu_x
Definition: hypar.h:457
int use_petscTS
Definition: hypar.h:395
int TimeExplicitRKCleanup(void *)
double * u0
Definition: hypar.h:396
int * stride_with_ghosts
Definition: hypar.h:414
double * fluxI
Definition: hypar.h:136
Structure containing the variables and function pointers defining a boundary.
int FPPowerSystem1BusCleanup(void *)
int ndims
Definition: hypar.h:26
double * uC
Definition: hypar.h:131
int gpuNavierStokes3DCleanup(void *)
#define _FP_POWER_SYSTEM_1BUS_
int FPPowerSystemCleanup(void *)
double * rhs
Definition: hypar.h:399
int ShallowWater2DCleanup(void *)
#define _FP_POWER_SYSTEM_
Definition: fppowersystem.h:44
int * gpu_dim_local
Definition: hypar.h:455
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int * stride_without_ghosts
Definition: hypar.h:416
int Numa2DCleanup(void *)
Definition: Numa2DCleanup.c:5
double * sendbuf
char spatial_scheme_hyp[_MAX_STRING_SIZE_]
Definition: hypar.h:84
#define _SHALLOW_WATER_1D_
double * Deriv1
Definition: hypar.h:151
int MPIFreeCommunicators(int, void *)
double * gpu_u
Definition: hypar.h:459
double * uL
Definition: hypar.h:139
int Euler1DCleanup(void *)
char time_scheme[_MAX_STRING_SIZE_]
Definition: hypar.h:78
#define _SHALLOW_WATER_2D_
#define _EULER_1D_
Definition: euler1d.h:50
int * index
Definition: hypar.h:102
int TimeGLMGEECleanup(void *)
void * lusolver
Definition: hypar.h:368
int * dim_local
Definition: hypar.h:37
void * interp
Definition: hypar.h:362
#define _GLM_GEE_
#define _RK_
int CompactSchemeCleanup(void *)
double * rhsref
Definition: hypar.h:398
#define _NUMA3D_
Definition: numa3d.h:26
double * StageBoundaryIntegral
Definition: hypar.h:382
void * physics
Definition: hypar.h:266
char model[_MAX_STRING_SIZE_]
Definition: hypar.h:263
#define _FIFTH_ORDER_CRWENO_
Definition: interpolation.h:28
double * TotalBoundaryIntegral
Definition: hypar.h:386
int IBCleanup(void *)
Definition: IBCleanup.c:11
double * fR
Definition: hypar.h:139
double * fL
Definition: hypar.h:139
#define _NUMA2D_
Definition: numa2d.h:21
#define _EULER_2D_
Definition: euler2d.h:25
double * StepBoundaryIntegral
Definition: hypar.h:384
#define _FIFTH_ORDER_COMPACT_UPWIND_
Definition: interpolation.h:24
Structure of MPI-related variables.
int FPDoubleWellCleanup(void *)
int use_gpu
Definition: hypar.h:449
void * msti
Definition: hypar.h:366
int nBoundaryZones
Definition: hypar.h:157
double * u_rom_predicted
Definition: hypar.h:403
#define _DECLARE_IERR_
Definition: basic.h:17
double * uref
Definition: hypar.h:397
void gpuFree(void *)
int * dim_global
Definition: hypar.h:33
#define _NAVIER_STOKES_3D_
int FPPowerSystem3BusCleanup(void *)
int * isPeriodic
Definition: hypar.h:162
double * VolumeIntegral
Definition: hypar.h:378
void * compact
Definition: hypar.h:364
double * source
Definition: hypar.h:125
double * hyp
Definition: hypar.h:119
double * VolumeIntegralInitial
Definition: hypar.h:380
double * fluxC
Definition: hypar.h:128
double * dxinv
Definition: hypar.h:110

◆ SimWriteErrors()

void SimWriteErrors ( void *  s,
int  nsims,
int  rank,
double  solver_runtime,
double  main_runtime 
)

Write errors for each simulation

Writes out the errors and other data for each simulation.

Parameters
sArray of simulations of type SimulationObject
nsimsNumber of simulations
rankMPI rank of this process
solver_runtimeMeasured runtime of solver
main_runtimeMeasured total runtime

Definition at line 18 of file SimulationWriteErrors.c.

24 {
26  int n;
27 
28  if (!rank) {
29 
30  if (nsims > 1) printf("\n");
31 
32  for (n = 0; n < nsims; n++) {
33 
34  char err_fname[_MAX_STRING_SIZE_],
35  cons_fname[_MAX_STRING_SIZE_],
36  fc_fname[_MAX_STRING_SIZE_];
37  strcpy(err_fname,"errors");
38  strcpy(cons_fname,"conservation");
39  strcpy(fc_fname,"function_counts");
40 #ifdef with_librom
41  char rom_diff_fname[_MAX_STRING_SIZE_];
42  strcpy(rom_diff_fname,"pde_rom_diff");
43 #endif
44 
45 
46  if (nsims > 1) {
47 
48  strcat(err_fname,"_");
49  strcat(cons_fname,"_");
50  strcat(fc_fname,"_");
51 #ifdef with_librom
52  strcat(rom_diff_fname,"_");
53 #endif
54 
55  char index[_MAX_STRING_SIZE_];
56  GetStringFromInteger(n, index, (int)log10(nsims)+1);
57 
58  strcat(err_fname,index);
59  strcat(cons_fname,index);
60  strcat(fc_fname,index);
61 #ifdef with_librom
62  strcat(rom_diff_fname,index);
63 #endif
64  }
65 
66  strcat(err_fname,".dat");
67  strcat(cons_fname,".dat");
68  strcat(fc_fname,".dat");
69 #ifdef with_librom
70  strcat(rom_diff_fname,".dat");
71 #endif
72 
73  FILE *out;
74  /* write out solution errors and wall times to file */
75  out = fopen(err_fname,"w");
76  for (int d=0; d<sim[n].solver.ndims; d++) fprintf(out,"%4d ",sim[n].solver.dim_global[d]);
77  for (int d=0; d<sim[n].solver.ndims; d++) fprintf(out,"%4d ",sim[n].mpi.iproc[d]);
78  fprintf(out,"%1.16E ",sim[n].solver.dt);
79  fprintf(out,"%1.16E %1.16E %1.16E ",sim[n].solver.error[0],sim[n].solver.error[1],sim[n].solver.error[2]);
80  fprintf(out,"%1.16E %1.16E\n",solver_runtime,main_runtime);
81  fclose(out);
82  /* write out conservation errors to file */
83  out = fopen(cons_fname,"w");
84  for (int d=0; d<sim[n].solver.ndims; d++) fprintf(out,"%4d ",sim[n].solver.dim_global[d]);
85  for (int d=0; d<sim[n].solver.ndims; d++) fprintf(out,"%4d ",sim[n].mpi.iproc[d]);
86  fprintf(out,"%1.16E ",sim[n].solver.dt);
87  for (int d=0; d<sim[n].solver.nvars; d++) fprintf(out,"%1.16E ",sim[n].solver.ConservationError[d]);
88  fprintf(out,"\n");
89  fclose(out);
90  /* write out function call counts to file */
91  out = fopen(fc_fname,"w");
92  fprintf(out,"%d\n",sim[n].solver.n_iter);
93  fprintf(out,"%d\n",sim[n].solver.count_hyp);
94  fprintf(out,"%d\n",sim[n].solver.count_par);
95  fprintf(out,"%d\n",sim[n].solver.count_sou);
96 #ifdef with_petsc
97  fprintf(out,"%d\n",sim[n].solver.count_RHSFunction);
98  fprintf(out,"%d\n",sim[n].solver.count_IFunction);
99  fprintf(out,"%d\n",sim[n].solver.count_IJacobian);
100  fprintf(out,"%d\n",sim[n].solver.count_IJacFunction);
101 #endif
102  fclose(out);
103 #ifdef with_librom
104  /* write out solution errors and wall times to file */
105  if (sim[n].solver.rom_diff_norms[0] >= 0) {
106  out = fopen(rom_diff_fname,"w");
107  for (int d=0; d<sim[n].solver.ndims; d++) fprintf(out,"%4d ",sim[n].solver.dim_global[d]);
108  for (int d=0; d<sim[n].solver.ndims; d++) fprintf(out,"%4d ",sim[n].mpi.iproc[d]);
109  fprintf(out,"%1.16E ",sim[n].solver.dt);
110  fprintf(out,"%1.16E %1.16E %1.16E ",sim[n].solver.rom_diff_norms[0],sim[n].solver.rom_diff_norms[1],sim[n].solver.rom_diff_norms[2]);
111  fprintf(out,"%1.16E %1.16E\n",solver_runtime,main_runtime);
112  fclose(out);
113  }
114 #endif
115 
116  /* print solution errors, conservation errors, and wall times to screen */
117  if (sim[n].solver.error[0] >= 0) {
118  printf("Computed errors for domain %d:\n", n);
119  printf(" L1 Error : %1.16E\n",sim[n].solver.error[0]);
120  printf(" L2 Error : %1.16E\n",sim[n].solver.error[1]);
121  printf(" Linfinity Error : %1.16E\n",sim[n].solver.error[2]);
122  }
123  if (!strcmp(sim[n].solver.ConservationCheck,"yes")) {
124  printf("Conservation Errors:\n");
125  for (int d=0; d<sim[n].solver.nvars; d++) printf("\t%1.16E\n",sim[n].solver.ConservationError[d]);
126  }
127 #ifdef with_librom
128  if (sim[n].solver.rom_diff_norms[0] >= 0) {
129  printf("Norms of the diff between ROM and PDE solutions for domain %d:\n", n);
130  printf(" L1 Norm : %1.16E\n",sim[n].solver.rom_diff_norms[0]);
131  printf(" L2 Norm : %1.16E\n",sim[n].solver.rom_diff_norms[1]);
132  printf(" Linfinity Norm : %1.16E\n",sim[n].solver.rom_diff_norms[2]);
133  }
134 #endif
135 
136  }
137 
138  printf("Solver runtime (in seconds): %1.16E\n",solver_runtime);
139  printf("Total runtime (in seconds): %1.16E\n",main_runtime);
140  if (nsims > 1) printf("\n");
141 
142  }
143 
144  return;
145 }
int nvars
Definition: hypar.h:29
Structure defining a simulation.
int ndims
Definition: hypar.h:26
#define _MAX_STRING_SIZE_
Definition: basic.h:14
void GetStringFromInteger(int, char *, int)

◆ SolvePETSc()

int SolvePETSc ( void *  s,
int  nsims,
int  rank,
int  nproc 
)

Integrate in time with PETSc.

Solve the PDE using PETSc TS

This function integrates the semi-discrete ODE (obtained from discretizing the PDE in space) using the time-integration module of PETSc (https://petsc.org/release/docs/manualpages/TS/index.html). The time-integration context is set up using the parameters specified in the input file. However, they can also be specified using PETSc's command line inputs.

See PETSc's documentation and examples for more details on how to use its TS module. All functions and data types whose names start with Vec, Mat, PC, KSP, SNES, and TS are PETSc functions - refer to the PETSc documentation (usually googling with the function name shows the man page for that function on PETSc's website).

Parameters
sArray of simulation objects of type SimulationObject
nsimsnumber of simulation objects
rankMPI rank of this process
nprocNumber of MPI processes

Definition at line 50 of file SolvePETSc.cpp.

54 {
56 
57  DM dm; /* data management object */
58  TS ts; /* time integration object */
59  Vec Y,Z; /* PETSc solution vectors */
60  Mat A, B; /* Jacobian and preconditioning matrices */
61  MatFDColoring fdcoloring; /* coloring for sparse Jacobian computation */
62  TSType time_scheme; /* time integration method */
63  TSProblemType ptype; /* problem type - nonlinear or linear */
64 
65  int flag_mat_a = 0,
66  flag_mat_b = 0,
67  flag_fdcoloring = 0,
68  iAuxSize = 0, i;
69 
70  PetscFunctionBegin;
71 
72  /* Register custom time-integration methods, if specified */
74  if(!rank) printf("Setting up PETSc time integration... \n");
75 
76  /* create and set a PETSc context */
77  PETScContext context;
78 
79  context.rank = rank;
80  context.nproc = nproc;
81 
82  context.simobj = sim;
83  context.nsims = nsims;
84 
85  /* default: everything explicit */
86  context.flag_hyperbolic = _EXPLICIT_;
87  context.flag_hyperbolic_f = _EXPLICIT_;
88  context.flag_hyperbolic_df = _EXPLICIT_;
89  context.flag_parabolic = _EXPLICIT_;
90  context.flag_source = _EXPLICIT_;
91 
92  context.tic = 0;
93  context.flag_is_linear = 0;
94  context.globalDOF.clear();
95  context.points.clear();
96  context.ti_runtime = 0.0;
97  context.waqt = 0.0;
98  context.dt = sim[0].solver.dt;
99  context.stage_times.clear();
100  context.stage_index = 0;
101 
102 #ifdef with_librom
103  if (!rank) printf("Setting up libROM interface.\n");
104  context.rom_interface = new libROMInterface( sim,
105  nsims,
106  rank,
107  nproc,
108  sim[0].solver.dt );
109  context.rom_mode = ((libROMInterface*)context.rom_interface)->mode();
110  context.op_times_arr.clear();
111 #endif
112 
113 #ifdef with_librom
114  if ( (context.rom_mode == _ROM_MODE_TRAIN_)
115  || (context.rom_mode == _ROM_MODE_INITIAL_GUESS_ )
116  || (context.rom_mode == _ROM_MODE_NONE_ ) ) {
117 
118  if (context.rom_mode == _ROM_MODE_INITIAL_GUESS_) {
119  ((libROMInterface*)context.rom_interface)->loadROM();
120  ((libROMInterface*)context.rom_interface)->projectInitialSolution(sim);
121  }
122 #endif
123  PetscCreatePointList(&context);
124 
125  /* create and initialize PETSc solution vector and other parameters */
126  /* PETSc solution vector does not have ghost points */
127  VecCreate(MPI_COMM_WORLD,&Y);
128  VecSetSizes(Y,context.ndofs,PETSC_DECIDE);
129  VecSetUp(Y);
130 
131  /* copy initial solution to PETSc's vector */
132  for (int ns = 0; ns < nsims; ns++) {
133  TransferVecToPETSc( sim[ns].solver.u,
134  Y,
135  &context,
136  ns,
137  context.offsets[ns] );
138  }
139 
140  /* Create the global DOF mapping for all the grid points */
141  PetscGlobalDOF(&context);
142 
143  /* Define and initialize the time-integration object */
144  TSCreate(MPI_COMM_WORLD,&ts);
145  TSSetMaxSteps(ts,sim[0].solver.n_iter);
146  TSSetMaxTime(ts,sim[0].solver.dt*sim[0].solver.n_iter);
147  TSSetTimeStep(ts,sim[0].solver.dt);
148  TSSetTime(ts,context.waqt);
149  TSSetExactFinalTime(ts,TS_EXACTFINALTIME_MATCHSTEP);
150  TSSetType(ts,TSBEULER);
151 
152  /* set default time step adaptivity to none */
153  TSAdapt adapt;
154  TSAdaptType adapt_type = TSADAPTNONE;
155  TSGetAdapt(ts,&adapt);
156  TSAdaptSetType(adapt,adapt_type);
157 
158  /* set options from input */
159  TSSetFromOptions(ts);
160 
161  /* create DM */
162  DMShellCreate(MPI_COMM_WORLD, &dm);
163  DMShellSetGlobalVector(dm, Y);
164  TSSetDM(ts, dm);
165 
166 #ifdef with_librom
167  TSAdaptGetType(adapt,&adapt_type);
168  if (strcmp(adapt_type, TSADAPTNONE)) {
169  if (!rank) printf("Warning: libROM interface not yet implemented for adaptive timestepping.\n");
170  }
171 #endif
172 
173  /* Define the right and left -hand side functions for each time-integration scheme */
174  TSGetType(ts,&time_scheme);
175  TSGetProblemType(ts,&ptype);
176 
177  if (!strcmp(time_scheme,TSARKIMEX)) {
178 
179  /* implicit - explicit time integration */
180 
181  TSSetRHSFunction(ts,nullptr,PetscRHSFunctionIMEX,&context);
182  TSSetIFunction (ts,nullptr,PetscIFunctionIMEX, &context);
183 
184  SNES snes;
185  KSP ksp;
186  PC pc;
187  SNESType snestype;
188  TSGetSNES(ts,&snes);
189  SNESGetType(snes,&snestype);
190 
191 #ifdef with_librom
192  if (context.rom_mode == _ROM_MODE_INITIAL_GUESS_) {
193  SNESSetComputeInitialGuess(snes, PetscSetInitialGuessROM, &context);
194  }
195 #endif
196 
197  context.flag_use_precon = 0;
198  PetscOptionsGetBool( nullptr,nullptr,
199  "-with_pc",
200  (PetscBool*)(&context.flag_use_precon),
201  nullptr );
202 
203  char precon_mat_type_c_st[_MAX_STRING_SIZE_] = "default";
204  PetscOptionsGetString( nullptr,
205  nullptr,
206  "-pc_matrix_type",
207  precon_mat_type_c_st,
209  nullptr );
210  context.precon_matrix_type = std::string(precon_mat_type_c_st);
211 
212  if (context.flag_use_precon) {
213 
214  if (context.precon_matrix_type == "default") {
215 
216  /* Matrix-free representation of the Jacobian */
217  flag_mat_a = 1;
218  MatCreateShell( MPI_COMM_WORLD,
219  context.ndofs,
220  context.ndofs,
221  PETSC_DETERMINE,
222  PETSC_DETERMINE,
223  &context,
224  &A);
225  if ((!strcmp(snestype,SNESKSPONLY)) || (ptype == TS_LINEAR)) {
226  /* linear problem */
227  context.flag_is_linear = 1;
228  MatShellSetOperation(A,MATOP_MULT,(void (*)(void))PetscJacobianFunctionIMEX_Linear);
229  SNESSetType(snes,SNESKSPONLY);
230  } else {
231  /* nonlinear problem */
232  context.flag_is_linear = 0;
233  context.jfnk_eps = 1e-7;
234  PetscOptionsGetReal(NULL,NULL,"-jfnk_epsilon",&context.jfnk_eps,NULL);
235  MatShellSetOperation(A,MATOP_MULT,(void (*)(void))PetscJacobianFunctionIMEX_JFNK);
236  }
237  MatSetUp(A);
238  /* check if Jacobian of the physical model is defined */
239  for (int ns = 0; ns < nsims; ns++) {
240  if ((!sim[ns].solver.JFunction) && (!sim[ns].solver.KFunction)) {
241  if (!rank) {
242  fprintf(stderr,"Error in SolvePETSc(): solver->JFunction or solver->KFunction ");
243  fprintf(stderr,"(point-wise Jacobians for hyperbolic or parabolic terms) must ");
244  fprintf(stderr,"be defined for preconditioning.\n");
245  }
246  PetscFunctionReturn(1);
247  }
248  }
249  /* Set up preconditioner matrix */
250  flag_mat_b = 1;
251  MatCreateAIJ( MPI_COMM_WORLD,
252  context.ndofs,
253  context.ndofs,
254  PETSC_DETERMINE,
255  PETSC_DETERMINE,
256  (sim[0].solver.ndims*2+1)*sim[0].solver.nvars, NULL,
257  2*sim[0].solver.ndims*sim[0].solver.nvars, NULL,
258  &B );
259  MatSetBlockSize(B,sim[0].solver.nvars);
260  /* Set the IJacobian function for TS */
261  TSSetIJacobian(ts,A,B,PetscIJacobianIMEX,&context);
262 
263  } else if (context.precon_matrix_type == "fd") {
264 
265  flag_mat_a = 1;
266  MatCreateSNESMF(snes,&A);
267  flag_mat_b = 1;
268  MatCreateAIJ( MPI_COMM_WORLD,
269  context.ndofs,
270  context.ndofs,
271  PETSC_DETERMINE,
272  PETSC_DETERMINE,
273  (sim[0].solver.ndims*2+1)*sim[0].solver.nvars, NULL,
274  2*sim[0].solver.ndims*sim[0].solver.nvars, NULL,
275  &B);
276  MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
277  /* Set the Jacobian function for SNES */
278  SNESSetJacobian(snes, A, B, SNESComputeJacobianDefault, NULL);
279 
280  } else if (context.precon_matrix_type == "colored_fd") {
281 
282  int stencil_width = 1;
283  PetscOptionsGetInt( NULL,
284  NULL,
285  "-pc_matrix_colored_fd_stencil_width",
286  &stencil_width,
287  NULL );
288 
289  flag_mat_a = 1;
290  MatCreateSNESMF(snes,&A);
291  flag_mat_b = 1;
292  MatCreateAIJ( MPI_COMM_WORLD,
293  context.ndofs,
294  context.ndofs,
295  PETSC_DETERMINE,
296  PETSC_DETERMINE,
297  (sim[0].solver.ndims*2+1)*sim[0].solver.nvars, NULL,
298  2*sim[0].solver.ndims*sim[0].solver.nvars, NULL,
299  &B);
300  MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
301  if (!rank) {
302  printf("PETSc: Setting Jacobian non-zero pattern (stencil width %d).\n",
303  stencil_width );
304  }
305  PetscJacobianMatNonzeroEntriesImpl(B, stencil_width, &context);
306 
307  /* Set the Jacobian function for SNES */
308  SNESSetJacobian(snes, A, B, SNESComputeJacobianDefaultColor, NULL);
309 
310  } else {
311 
312  if (!rank) {
313  fprintf( stderr,"Invalid input for \"-pc_matrix_type\": %s.\n",
314  context.precon_matrix_type.c_str());
315  }
316  PetscFunctionReturn(0);
317 
318  }
319 
320  /* set PC side to right */
321  SNESGetKSP(snes,&ksp);
322  KSPSetPCSide(ksp, PC_RIGHT);
323 
324  } else {
325 
326  /* Matrix-free representation of the Jacobian */
327  flag_mat_a = 1;
328  MatCreateShell( MPI_COMM_WORLD,
329  context.ndofs,
330  context.ndofs,
331  PETSC_DETERMINE,
332  PETSC_DETERMINE,
333  &context,
334  &A);
335  if ((!strcmp(snestype,SNESKSPONLY)) || (ptype == TS_LINEAR)) {
336  /* linear problem */
337  context.flag_is_linear = 1;
338  MatShellSetOperation(A,MATOP_MULT,(void (*)(void))PetscJacobianFunctionIMEX_Linear);
339  SNESSetType(snes,SNESKSPONLY);
340  } else {
341  /* nonlinear problem */
342  context.flag_is_linear = 0;
343  context.jfnk_eps = 1e-7;
344  PetscOptionsGetReal(NULL,NULL,"-jfnk_epsilon",&context.jfnk_eps,NULL);
345  MatShellSetOperation(A,MATOP_MULT,(void (*)(void))PetscJacobianFunctionIMEX_JFNK);
346  }
347  MatSetUp(A);
348  /* Set the RHSJacobian function for TS */
349  TSSetIJacobian(ts,A,A,PetscIJacobianIMEX,&context);
350  /* Set PC (preconditioner) to none */
351  SNESGetKSP(snes,&ksp);
352  KSPGetPC(ksp,&pc);
353  PCSetType(pc,PCNONE);
354  }
355 
356  /* read the implicit/explicit flags for each of the terms for IMEX schemes */
357  /* default -> hyperbolic - explicit, parabolic and source - implicit */
358  PetscBool flag = PETSC_FALSE;
359 
360  context.flag_hyperbolic = _EXPLICIT_;
361  context.flag_hyperbolic_f = _EXPLICIT_;
362  context.flag_hyperbolic_df = _IMPLICIT_;
363  context.flag_parabolic = _IMPLICIT_;
364  context.flag_source = _IMPLICIT_;
365 
366  if (!strcmp(sim[0].solver.SplitHyperbolicFlux,"yes")) {
367 
368  flag = PETSC_FALSE;
369  PetscOptionsGetBool(nullptr,nullptr,"-hyperbolic_f_explicit",&flag,nullptr);
370  if (flag == PETSC_TRUE) context.flag_hyperbolic_f = _EXPLICIT_;
371  flag = PETSC_FALSE;
372  PetscOptionsGetBool(nullptr,nullptr,"-hyperbolic_f_implicit",&flag,nullptr);
373  if (flag == PETSC_TRUE) context.flag_hyperbolic_f = _IMPLICIT_;
374 
375  flag = PETSC_FALSE;
376  PetscOptionsGetBool(nullptr,nullptr,"-hyperbolic_df_explicit",&flag,nullptr);
377  if (flag == PETSC_TRUE) context.flag_hyperbolic_df = _EXPLICIT_;
378  flag = PETSC_FALSE;
379  PetscOptionsGetBool(nullptr,nullptr,"-hyperbolic_df_implicit",&flag,nullptr);
380  if (flag == PETSC_TRUE) context.flag_hyperbolic_df = _IMPLICIT_;
381 
382  } else {
383 
384  flag = PETSC_FALSE;
385  PetscOptionsGetBool(nullptr,nullptr,"-hyperbolic_explicit",&flag,nullptr);
386  if (flag == PETSC_TRUE) context.flag_hyperbolic = _EXPLICIT_;
387  flag = PETSC_FALSE;
388  PetscOptionsGetBool(nullptr,nullptr,"-hyperbolic_implicit",&flag,nullptr);
389  if (flag == PETSC_TRUE) context.flag_hyperbolic = _IMPLICIT_;
390 
391  }
392 
393  flag = PETSC_FALSE;
394  PetscOptionsGetBool(nullptr,nullptr,"-parabolic_explicit",&flag,nullptr);
395  if (flag == PETSC_TRUE) context.flag_parabolic = _EXPLICIT_;
396  flag = PETSC_FALSE;
397  PetscOptionsGetBool(nullptr,nullptr,"-parabolic_implicit",&flag,nullptr);
398  if (flag == PETSC_TRUE) context.flag_parabolic = _IMPLICIT_;
399 
400  flag = PETSC_FALSE;
401  PetscOptionsGetBool(nullptr,nullptr,"-source_explicit",&flag,nullptr);
402  if (flag == PETSC_TRUE) context.flag_source = _EXPLICIT_;
403  flag = PETSC_FALSE;
404  PetscOptionsGetBool(nullptr,nullptr,"-source_implicit",&flag,nullptr);
405  if (flag == PETSC_TRUE) context.flag_source = _IMPLICIT_;
406 
407  flag = PETSC_FALSE;
408  PetscOptionsGetBool(nullptr,nullptr,"-ts_arkimex_fully_implicit",&flag,nullptr);
409  if (flag == PETSC_TRUE) {
410  context.flag_hyperbolic_f = _IMPLICIT_;
411  context.flag_hyperbolic_df = _IMPLICIT_;
412  context.flag_hyperbolic = _IMPLICIT_;
413  context.flag_parabolic = _IMPLICIT_;
414  context.flag_source = _IMPLICIT_;
415  }
416 
417  /* print out a summary of the treatment of each term */
418  if (!rank) {
419  printf("Implicit-Explicit time-integration:-\n");
420  if (!strcmp(sim[0].solver.SplitHyperbolicFlux,"yes")) {
421  if (context.flag_hyperbolic_f == _EXPLICIT_) printf("Hyperbolic (f-df) term: Explicit\n");
422  else printf("Hyperbolic (f-df) term: Implicit\n");
423  if (context.flag_hyperbolic_df == _EXPLICIT_) printf("Hyperbolic (df) term: Explicit\n");
424  else printf("Hyperbolic (df) term: Implicit\n");
425  } else {
426  if (context.flag_hyperbolic == _EXPLICIT_) printf("Hyperbolic term: Explicit\n");
427  else printf("Hyperbolic term: Implicit\n");
428  }
429  if (context.flag_parabolic == _EXPLICIT_) printf("Parabolic term: Explicit\n");
430  else printf("Parabolic term: Implicit\n");
431  if (context.flag_source == _EXPLICIT_) printf("Source term: Explicit\n");
432  else printf("Source term: Implicit\n");
433  }
434 
435  } else if ( (!strcmp(time_scheme,TSEULER))
436  || (!strcmp(time_scheme,TSRK ))
437  || (!strcmp(time_scheme,TSSSP )) ) {
438 
439  /* Explicit time integration */
440  TSSetRHSFunction(ts,nullptr,PetscRHSFunctionExpl,&context);
441 
442  } else if ( (!strcmp(time_scheme,TSCN))
443  || (!strcmp(time_scheme,TSBEULER )) ) {
444 
445 
446  /* Implicit time integration */
447 
448  TSSetIFunction(ts,nullptr,PetscIFunctionImpl,&context);
449 
450  SNES snes;
451  KSP ksp;
452  PC pc;
453  SNESType snestype;
454  TSGetSNES(ts,&snes);
455  SNESGetType(snes,&snestype);
456 
457 #ifdef with_librom
458  if (context.rom_mode == _ROM_MODE_INITIAL_GUESS_) {
459  SNESSetComputeInitialGuess(snes, PetscSetInitialGuessROM, &context);
460  }
461 #endif
462 
463  context.flag_use_precon = 0;
464  PetscOptionsGetBool( nullptr,
465  nullptr,
466  "-with_pc",
467  (PetscBool*)(&context.flag_use_precon),
468  nullptr );
469 
470  char precon_mat_type_c_st[_MAX_STRING_SIZE_] = "default";
471  PetscOptionsGetString( nullptr,
472  nullptr,
473  "-pc_matrix_type",
474  precon_mat_type_c_st,
476  nullptr );
477  context.precon_matrix_type = std::string(precon_mat_type_c_st);
478 
479  if (context.flag_use_precon) {
480 
481  if (context.precon_matrix_type == "default") {
482 
483  /* Matrix-free representation of the Jacobian */
484  flag_mat_a = 1;
485  MatCreateShell( MPI_COMM_WORLD,
486  context.ndofs,
487  context.ndofs,
488  PETSC_DETERMINE,
489  PETSC_DETERMINE,
490  &context,
491  &A);
492  if ((!strcmp(snestype,SNESKSPONLY)) || (ptype == TS_LINEAR)) {
493  /* linear problem */
494  context.flag_is_linear = 1;
495  MatShellSetOperation(A,MATOP_MULT,(void (*)(void))PetscJacobianFunction_Linear);
496  SNESSetType(snes,SNESKSPONLY);
497  } else {
498  /* nonlinear problem */
499  context.flag_is_linear = 0;
500  context.jfnk_eps = 1e-7;
501  PetscOptionsGetReal(NULL,NULL,"-jfnk_epsilon",&context.jfnk_eps,NULL);
502  MatShellSetOperation(A,MATOP_MULT,(void (*)(void))PetscJacobianFunction_JFNK);
503  }
504  MatSetUp(A);
505  /* check if Jacobian of the physical model is defined */
506  for (int ns = 0; ns < nsims; ns++) {
507  if ((!sim[ns].solver.JFunction) && (!sim[ns].solver.KFunction)) {
508  if (!rank) {
509  fprintf(stderr,"Error in SolvePETSc(): solver->JFunction or solver->KFunction ");
510  fprintf(stderr,"(point-wise Jacobians for hyperbolic or parabolic terms) must ");
511  fprintf(stderr,"be defined for preconditioning.\n");
512  }
513  PetscFunctionReturn(1);
514  }
515  }
516  /* Set up preconditioner matrix */
517  flag_mat_b = 1;
518  MatCreateAIJ( MPI_COMM_WORLD,
519  context.ndofs,
520  context.ndofs,
521  PETSC_DETERMINE,
522  PETSC_DETERMINE,
523  (sim[0].solver.ndims*2+1)*sim[0].solver.nvars, NULL,
524  2*sim[0].solver.ndims*sim[0].solver.nvars, NULL,
525  &B );
526  MatSetBlockSize(B,sim[0].solver.nvars);
527  /* Set the IJacobian function for TS */
528  TSSetIJacobian(ts,A,B,PetscIJacobian,&context);
529 
530  } else if (context.precon_matrix_type == "fd") {
531 
532  flag_mat_a = 1;
533  MatCreateSNESMF(snes,&A);
534  flag_mat_b = 1;
535  MatCreateAIJ( MPI_COMM_WORLD,
536  context.ndofs,
537  context.ndofs,
538  PETSC_DETERMINE,
539  PETSC_DETERMINE,
540  (sim[0].solver.ndims*2+1)*sim[0].solver.nvars, NULL,
541  2*sim[0].solver.ndims*sim[0].solver.nvars, NULL,
542  &B);
543  MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
544  /* Set the Jacobian function for SNES */
545  SNESSetJacobian(snes, A, B, SNESComputeJacobianDefault, NULL);
546 
547  } else if (context.precon_matrix_type == "colored_fd") {
548 
549  int stencil_width = 1;
550  PetscOptionsGetInt( NULL,
551  NULL,
552  "-pc_matrix_colored_fd_stencil_width",
553  &stencil_width,
554  NULL );
555 
556  flag_mat_a = 1;
557  MatCreateSNESMF(snes,&A);
558  flag_mat_b = 1;
559  MatCreateAIJ( MPI_COMM_WORLD,
560  context.ndofs,
561  context.ndofs,
562  PETSC_DETERMINE,
563  PETSC_DETERMINE,
564  (sim[0].solver.ndims*2+1)*sim[0].solver.nvars, NULL,
565  2*sim[0].solver.ndims*sim[0].solver.nvars, NULL,
566  &B);
567  MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
568  if (!rank) {
569  printf("PETSc: Setting Jacobian non-zero pattern (stencil width %d).\n",
570  stencil_width );
571  }
572  PetscJacobianMatNonzeroEntriesImpl(B, stencil_width, &context);
573 
574  /* Set the Jacobian function for SNES */
575  SNESSetJacobian(snes, A, B, SNESComputeJacobianDefaultColor, NULL);
576 
577  } else {
578 
579  if (!rank) {
580  fprintf( stderr,"Invalid input for \"-pc_matrix_type\": %s.\n",
581  context.precon_matrix_type.c_str());
582  }
583  PetscFunctionReturn(0);
584 
585  }
586 
587  /* set PC side to right */
588  SNESGetKSP(snes,&ksp);
589  KSPSetPCSide(ksp, PC_RIGHT);
590 
591  } else {
592 
593  /* Matrix-free representation of the Jacobian */
594  flag_mat_a = 1;
595  MatCreateShell( MPI_COMM_WORLD,
596  context.ndofs,
597  context.ndofs,
598  PETSC_DETERMINE,
599  PETSC_DETERMINE,
600  &context,
601  &A);
602  if ((!strcmp(snestype,SNESKSPONLY)) || (ptype == TS_LINEAR)) {
603  /* linear problem */
604  context.flag_is_linear = 1;
605  MatShellSetOperation(A,MATOP_MULT,(void (*)(void))PetscJacobianFunction_Linear);
606  SNESSetType(snes,SNESKSPONLY);
607  } else {
608  /* nonlinear problem */
609  context.flag_is_linear = 0;
610  context.jfnk_eps = 1e-7;
611  PetscOptionsGetReal(NULL,NULL,"-jfnk_epsilon",&context.jfnk_eps,NULL);
612  MatShellSetOperation(A,MATOP_MULT,(void (*)(void))PetscJacobianFunction_JFNK);
613  }
614  MatSetUp(A);
615  /* Set the RHSJacobian function for TS */
616  TSSetIJacobian(ts,A,A,PetscIJacobian,&context);
617  /* Set PC (preconditioner) to none */
618  SNESGetKSP(snes,&ksp);
619  KSPGetPC(ksp,&pc);
620  PCSetType(pc,PCNONE);
621  }
622 
623  } else {
624 
625  if (!rank) {
626  fprintf(stderr, "Time integration type %s is not yet supported.\n", time_scheme);
627  }
628  PetscFunctionReturn(0);
629 
630  }
631 
632  /* Set pre/post-stage and post-timestep function */
633  TSSetPreStep (ts,PetscPreTimeStep );
634  TSSetPreStage(ts,PetscPreStage );
635  TSSetPostStage(ts,PetscPostStage );
636  TSSetPostStep(ts,PetscPostTimeStep);
637  /* Set solution vector for TS */
638  TSSetSolution(ts,Y);
639  /* Set it all up */
640  TSSetUp(ts);
641  /* Set application context */
642  TSSetApplicationContext(ts,&context);
643 
644  if (!rank) {
645  if (context.flag_is_linear) printf("SolvePETSc(): Problem type is linear.\n");
646  else printf("SolvePETSc(): Problem type is nonlinear.\n");
647  }
648 
649  if (!rank) printf("** Starting PETSc time integration **\n");
650  context.ti_runtime = 0.0;
651  TSSolve(ts,Y);
652  if (!rank) {
653  printf("** Completed PETSc time integration (Final time: %f), total wctime: %f (seconds) **\n",
654  context.waqt, context.ti_runtime );
655  }
656 
657  /* Get the number of time steps */
658  for (int ns = 0; ns < nsims; ns++) {
659  TSGetStepNumber(ts,&(sim[ns].solver.n_iter));
660  }
661 
662  /* get and write to file any auxiliary solutions */
663  char aux_fname_root[4] = "ts0";
664  TSGetSolutionComponents(ts,&iAuxSize,NULL);
665  if (iAuxSize) {
666  if (iAuxSize > 10) iAuxSize = 10;
667  if (!rank) printf("Number of auxiliary solutions from time integration: %d\n",iAuxSize);
668  VecDuplicate(Y,&Z);
669  for (i=0; i<iAuxSize; i++) {
670  TSGetSolutionComponents(ts,&i,&Z);
671  for (int ns = 0; ns < nsims; ns++) {
672  TransferVecFromPETSc(sim[ns].solver.u,Z,&context,ns,context.offsets[ns]);
673  WriteArray( sim[ns].solver.ndims,
674  sim[ns].solver.nvars,
675  sim[ns].solver.dim_global,
676  sim[ns].solver.dim_local,
677  sim[ns].solver.ghosts,
678  sim[ns].solver.x,
679  sim[ns].solver.u,
680  &(sim[ns].solver),
681  &(sim[ns].mpi),
682  aux_fname_root );
683  }
684  aux_fname_root[2]++;
685  }
686  VecDestroy(&Z);
687  }
688 
689  /* if available, get error estimates */
690  PetscTimeError(ts);
691 
692  /* copy final solution from PETSc's vector */
693  for (int ns = 0; ns < nsims; ns++) {
694  TransferVecFromPETSc(sim[ns].solver.u,Y,&context,ns,context.offsets[ns]);
695  }
696 
697  /* clean up */
698  VecDestroy(&Y);
699  if (flag_mat_a) { MatDestroy(&A); }
700  if (flag_mat_b) { MatDestroy(&B); }
701  if (flag_fdcoloring) { MatFDColoringDestroy(&fdcoloring); }
702  TSDestroy(&ts);
703  DMDestroy(&dm);
704 
705  /* write a final solution file, if last iteration did not write one */
706  if (context.tic) {
707  for (int ns = 0; ns < nsims; ns++) {
708  HyPar* solver = &(sim[ns].solver);
709  MPIVariables* mpi = &(sim[ns].mpi);
710  if (solver->PhysicsOutput) {
711  solver->PhysicsOutput(solver,mpi, context.waqt);
712  }
713  CalculateError(solver,mpi);
714  }
715  OutputSolution(sim, nsims, context.waqt);
716  }
717  /* calculate error if exact solution has been provided */
718  for (int ns = 0; ns < nsims; ns++) {
719  CalculateError(&(sim[ns].solver), &(sim[ns].mpi));
720  }
721 
722  PetscCleanup(&context);
723 
724 #ifdef with_librom
725  context.op_times_arr.push_back(context.waqt);
726 
727  for (int ns = 0; ns < nsims; ns++) {
728  ResetFilenameIndex( sim[ns].solver.filename_index,
729  sim[ns].solver.index_length );
730  }
731 
732  if (((libROMInterface*)context.rom_interface)->mode() == _ROM_MODE_TRAIN_) {
733 
734  ((libROMInterface*)context.rom_interface)->train();
735  if (!rank) printf("libROM: total training wallclock time: %f (seconds).\n",
736  ((libROMInterface*)context.rom_interface)->trainWallclockTime() );
737 
738  double total_rom_predict_time = 0;
739  for (int iter = 0; iter < context.op_times_arr.size(); iter++) {
740 
741  double waqt = context.op_times_arr[iter];
742 
743  ((libROMInterface*)context.rom_interface)->predict(sim, waqt);
744  if (!rank) printf( "libROM: Predicted solution at time %1.4e using ROM, wallclock time: %f.\n",
745  waqt, ((libROMInterface*)context.rom_interface)->predictWallclockTime() );
746  total_rom_predict_time += ((libROMInterface*)context.rom_interface)->predictWallclockTime();
747 
748  /* calculate diff between ROM and PDE solutions */
749  if (iter == (context.op_times_arr.size()-1)) {
750  if (!rank) printf("libROM: Calculating diff between PDE and ROM solutions.\n");
751  for (int ns = 0; ns < nsims; ns++) {
752  CalculateROMDiff( &(sim[ns].solver),
753  &(sim[ns].mpi) );
754  }
755  }
756  /* write the ROM solution to file */
757  OutputROMSolution(sim, nsims, waqt);
758 
759  }
760 
761  if (!rank) {
762  printf( "libROM: total prediction/query wallclock time: %f (seconds).\n",
763  total_rom_predict_time );
764  }
765 
766  ((libROMInterface*)context.rom_interface)->saveROM();
767 
768  } else {
769 
770  for (int ns = 0; ns < nsims; ns++) {
771  sim[ns].solver.rom_diff_norms[0]
772  = sim[ns].solver.rom_diff_norms[1]
773  = sim[ns].solver.rom_diff_norms[2]
774  = -1;
775  }
776 
777  }
778 
779  } else if (context.rom_mode == _ROM_MODE_PREDICT_) {
780 
781  for (int ns = 0; ns < nsims; ns++) {
782  sim[ns].solver.rom_diff_norms[0]
783  = sim[ns].solver.rom_diff_norms[1]
784  = sim[ns].solver.rom_diff_norms[2]
785  = -1;
786  strcpy(sim[ns].solver.ConservationCheck,"no");
787  }
788 
789  ((libROMInterface*)context.rom_interface)->loadROM();
790  ((libROMInterface*)context.rom_interface)->projectInitialSolution(sim);
791 
792  {
793  int start_iter = sim[0].solver.restart_iter;
794  int n_iter = sim[0].solver.n_iter;
795  double dt = sim[0].solver.dt;
796 
797  double cur_time = start_iter * dt;
798  context.op_times_arr.push_back(cur_time);
799 
800  for (int iter = start_iter; iter < n_iter; iter++) {
801  cur_time += dt;
802  if ( ( (iter+1)%sim[0].solver.file_op_iter == 0)
803  && ( (iter+1) < n_iter) ) {
804  context.op_times_arr.push_back(cur_time);
805  }
806  }
807 
808  double t_final = n_iter*dt;
809  context.op_times_arr.push_back(t_final);
810  }
811 
812  double total_rom_predict_time = 0;
813  for (int iter = 0; iter < context.op_times_arr.size(); iter++) {
814 
815  double waqt = context.op_times_arr[iter];
816 
817  ((libROMInterface*)context.rom_interface)->predict(sim, waqt);
818  if (!rank) printf( "libROM: Predicted solution at time %1.4e using ROM, wallclock time: %f.\n",
819  waqt, ((libROMInterface*)context.rom_interface)->predictWallclockTime() );
820  total_rom_predict_time += ((libROMInterface*)context.rom_interface)->predictWallclockTime();
821 
822  /* write the solution to file */
823  for (int ns = 0; ns < nsims; ns++) {
824  if (sim[ns].solver.PhysicsOutput) {
825  sim[ns].solver.PhysicsOutput( &(sim[ns].solver),
826  &(sim[ns].mpi),
827  waqt );
828  }
829  }
830  OutputSolution(sim, nsims, waqt);
831 
832  }
833 
834  /* calculate error if exact solution has been provided */
835  for (int ns = 0; ns < nsims; ns++) {
836  CalculateError(&(sim[ns].solver),
837  &(sim[ns].mpi) );
838  }
839 
840  if (!rank) {
841  printf( "libROM: total prediction/query wallclock time: %f (seconds).\n",
842  total_rom_predict_time );
843  }
844 
845  }
846 
847  delete ((libROMInterface*)context.rom_interface);
848 #endif
849 
850  PetscFunctionReturn(0);
851 }
std::vector< int * > points
int(* PhysicsOutput)(void *, void *, double)
Definition: hypar.h:347
int TransferVecToPETSc(const double *const, Vec, void *, const int, const int)
PetscErrorCode PetscPostTimeStep(TS)
PetscErrorCode PetscPreTimeStep(TS)
char * filename_index
Definition: hypar.h:197
int CalculateROMDiff(void *, void *)
#define _ROM_MODE_INITIAL_GUESS_
Structure defining a simulation.
int CalculateError(void *, void *)
std::vector< double > stage_times
#define _IMPLICIT_
int PetscJacobianMatNonzeroEntriesImpl(Mat, int, void *)
int PetscCleanup(void *)
int n_iter
Definition: hypar.h:55
#define _ROM_MODE_PREDICT_
PetscErrorCode PetscPreStage(TS, PetscReal)
int OutputSolution(void *, int, double)
PetscErrorCode PetscJacobianFunctionIMEX_Linear(Mat, Vec, Vec)
int OutputROMSolution(void *, int, double)
int PetscGlobalDOF(void *)
int restart_iter
Definition: hypar.h:58
PetscErrorCode PetscRHSFunctionExpl(TS, PetscReal, Vec, Vec, void *)
void ResetFilenameIndex(char *, int)
#define _ROM_MODE_NONE_
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _MAX_STRING_SIZE_
Definition: basic.h:14
char ConservationCheck[_MAX_STRING_SIZE_]
Definition: hypar.h:376
#define _ROM_MODE_TRAIN_
int WriteArray(int, int, int *, int *, int, double *, double *, void *, void *, char *)
Definition: WriteArray.c:27
std::vector< double > op_times_arr
int PetscRegisterTIMethods(int)
Structure containing the variables for time-integration with PETSc.
double dt
Definition: hypar.h:67
double rom_diff_norms[3]
Definition: hypar.h:405
PetscErrorCode PetscPostStage(TS, PetscReal, PetscInt, Vec *)
Class implementing interface with libROM.
PetscErrorCode PetscJacobianFunction_JFNK(Mat, Vec, Vec)
PetscErrorCode PetscSetInitialGuessROM(SNES, Vec, void *)
PetscErrorCode PetscIJacobian(TS, PetscReal, Vec, Vec, PetscReal, Mat, Mat, void *)
Structure of MPI-related variables.
PetscErrorCode PetscIJacobianIMEX(TS, PetscReal, Vec, Vec, PetscReal, Mat, Mat, void *)
std::vector< double * > globalDOF
PetscErrorCode PetscRHSFunctionIMEX(TS, PetscReal, Vec, Vec, void *)
PetscErrorCode PetscTimeError(TS)
Definition: PetscError.cpp:20
std::string precon_matrix_type
int PetscCreatePointList(void *)
PetscErrorCode PetscIFunctionIMEX(TS, PetscReal, Vec, Vec, Vec, void *)
int TransferVecFromPETSc(double *const, const Vec, void *, const int, const int)
PetscErrorCode PetscJacobianFunctionIMEX_JFNK(Mat, Vec, Vec)
int file_op_iter
Definition: hypar.h:171
PetscErrorCode PetscIFunctionImpl(TS, PetscReal, Vec, Vec, Vec, void *)
#define _EXPLICIT_
PetscErrorCode PetscJacobianFunction_Linear(Mat, Vec, Vec)

◆ Solve()

int Solve ( void *  s,
int  nsims,
int  rank,
int  nproc 
)

Solve the PDE - time-integration

This function integrates the semi-discrete ODE (obtained from discretizing the PDE in space) using natively implemented time integration methods. It initializes the time integration object, iterates the simulation for the required number of time steps, and calculates the errors. After the specified number of iterations, it writes out some information to the screen and the solution to a file.

Parameters
sArray of simulation objects of type SimulationObject
nsimsnumber of simulation objects
rankMPI rank of this process
nprocNumber of MPI processes

Definition at line 37 of file Solve.cpp.

42 {
44 
45  /* make sure none of the simulation objects sent in the array
46  * are "barebones" type */
47  for (int ns = 0; ns < nsims; ns++) {
48  if (sim[ns].is_barebones == 1) {
49  fprintf(stderr, "Error in Solve(): simulation object %d on rank %d is barebones!\n",
50  ns, rank );
51  return 1;
52  }
53  }
54 
55  /* write out iblank to file for visualization */
56  for (int ns = 0; ns < nsims; ns++) {
57  if (sim[ns].solver.flag_ib) {
58 
59  char fname_root[_MAX_STRING_SIZE_] = "iblank";
60  if (nsims > 1) {
61  char index[_MAX_STRING_SIZE_];
62  GetStringFromInteger(ns, index, (int)log10((nsims)+1));
63  strcat(fname_root, "_");
64  strcat(fname_root, index);
65  }
66 
67  WriteArray( sim[ns].solver.ndims,
68  1,
69  sim[ns].solver.dim_global,
70  sim[ns].solver.dim_local,
71  sim[ns].solver.ghosts,
72  sim[ns].solver.x,
73  sim[ns].solver.iblank,
74  &(sim[ns].solver),
75  &(sim[ns].mpi),
76  fname_root );
77  }
78  }
79 
80 #ifdef with_librom
81  if (!rank) printf("Setting up libROM interface.\n");
82  libROMInterface rom_interface( sim, nsims, rank, nproc, sim[0].solver.dt );
83  const std::string& rom_mode( rom_interface.mode() );
84  std::vector<double> op_times_arr(0);
85 #endif
86 
87 #ifdef with_librom
88  if ((rom_mode == _ROM_MODE_TRAIN_) || (rom_mode == _ROM_MODE_NONE_)) {
89 #endif
90  /* Define and initialize the time-integration object */
91  TimeIntegration TS;
92  if (!rank) printf("Setting up time integration.\n");
93  TimeInitialize(sim, nsims, rank, nproc, &TS);
94  double ti_runtime = 0.0;
95 
96  if (!rank) printf("Solving in time (from %d to %d iterations)\n",TS.restart_iter,TS.n_iter);
97  for (TS.iter = TS.restart_iter; TS.iter < TS.n_iter; TS.iter++) {
98 
99  /* Write initial solution to file if this is the first iteration */
100  if (!TS.iter) {
101  for (int ns = 0; ns < nsims; ns++) {
102  if (sim[ns].solver.PhysicsOutput) {
103  sim[ns].solver.PhysicsOutput( &(sim[ns].solver),
104  &(sim[ns].mpi),
105  TS.waqt );
106  }
107  }
108  OutputSolution(sim, nsims, TS.waqt);
109 #ifdef with_librom
110  op_times_arr.push_back(TS.waqt);
111 #endif
112  }
113 
114 #ifdef with_librom
115  if ((rom_mode == _ROM_MODE_TRAIN_) && (TS.iter%rom_interface.samplingFrequency() == 0)) {
116  rom_interface.takeSample( sim, TS.waqt );
117  }
118 #endif
119 
120  /* Call pre-step function */
121  TimePreStep (&TS);
122 #ifdef compute_rhs_operators
123  /* compute and write (to file) matrix operators representing the right-hand side */
124 // if (((TS.iter+1)%solver->file_op_iter == 0) || (!TS.iter))
125 // { ComputeRHSOperators(solver,mpi,TS.waqt);
126 #endif
127 
128  /* Step in time */
129  TimeStep (&TS);
130 
131  /* Call post-step function */
132  TimePostStep (&TS);
133 
134  ti_runtime += TS.iter_wctime;
135 
136  /* Print information to screen */
137  TimePrintStep(&TS);
138 
139  /* Write intermediate solution to file */
140  if ( ((TS.iter+1)%sim[0].solver.file_op_iter == 0)
141  && ((TS.iter+1) < TS.n_iter) ) {
142  for (int ns = 0; ns < nsims; ns++) {
143  if (sim[ns].solver.PhysicsOutput) {
144  sim[ns].solver.PhysicsOutput( &(sim[ns].solver),
145  &(sim[ns].mpi),
146  TS.waqt );
147  }
148  }
149  OutputSolution(sim, nsims, TS.waqt);
150 #ifdef with_librom
151  op_times_arr.push_back(TS.waqt);
152 #endif
153  }
154 
155  }
156 
157  double t_final = TS.waqt;
158  TimeCleanup(&TS);
159 
160  if (!rank) {
161  printf( "Completed time integration (Final time: %f), total wctime: %f (seconds).\n",
162  t_final, ti_runtime );
163  if (nsims > 1) printf("\n");
164  }
165 
166  /* calculate error if exact solution has been provided */
167  for (int ns = 0; ns < nsims; ns++) {
168  CalculateError(&(sim[ns].solver),
169  &(sim[ns].mpi) );
170  }
171 
172  /* write a final solution file */
173  for (int ns = 0; ns < nsims; ns++) {
174  if (sim[ns].solver.PhysicsOutput) {
175  sim[ns].solver.PhysicsOutput( &(sim[ns].solver),
176  &(sim[ns].mpi),
177  t_final );
178  }
179  }
180  OutputSolution(sim, nsims, t_final);
181 
182 #ifdef with_librom
183  op_times_arr.push_back(TS.waqt);
184 
185  for (int ns = 0; ns < nsims; ns++) {
186  ResetFilenameIndex( sim[ns].solver.filename_index,
187  sim[ns].solver.index_length );
188  }
189 
190  if (rom_interface.mode() == _ROM_MODE_TRAIN_) {
191 
192  rom_interface.train();
193  if (!rank) printf("libROM: total training wallclock time: %f (seconds).\n",
194  rom_interface.trainWallclockTime() );
195 
196  double total_rom_predict_time = 0;
197  for (int iter = 0; iter < op_times_arr.size(); iter++) {
198 
199  double waqt = op_times_arr[iter];
200 
201  rom_interface.predict(sim, waqt);
202  if (!rank) printf( "libROM: Predicted solution at time %1.4e using ROM, wallclock time: %f.\n",
203  waqt, rom_interface.predictWallclockTime() );
204  total_rom_predict_time += rom_interface.predictWallclockTime();
205 
206  /* calculate diff between ROM and PDE solutions */
207  if (iter == (op_times_arr.size()-1)) {
208  if (!rank) printf("libROM: Calculating diff between PDE and ROM solutions.\n");
209  for (int ns = 0; ns < nsims; ns++) {
210  CalculateROMDiff( &(sim[ns].solver),
211  &(sim[ns].mpi) );
212  }
213  }
214  /* write the ROM solution to file */
215  OutputROMSolution(sim, nsims,waqt);
216 
217  }
218 
219  if (!rank) {
220  printf( "libROM: total prediction/query wallclock time: %f (seconds).\n",
221  total_rom_predict_time );
222  }
223 
224  rom_interface.saveROM();
225 
226  } else {
227 
228  for (int ns = 0; ns < nsims; ns++) {
229  sim[ns].solver.rom_diff_norms[0]
230  = sim[ns].solver.rom_diff_norms[1]
231  = sim[ns].solver.rom_diff_norms[2]
232  = -1;
233  }
234 
235  }
236 
237  } else if (rom_mode == _ROM_MODE_PREDICT_) {
238 
239  for (int ns = 0; ns < nsims; ns++) {
240  sim[ns].solver.rom_diff_norms[0]
241  = sim[ns].solver.rom_diff_norms[1]
242  = sim[ns].solver.rom_diff_norms[2]
243  = -1;
244  strcpy(sim[ns].solver.ConservationCheck,"no");
245  }
246 
247  rom_interface.loadROM();
248  rom_interface.projectInitialSolution(sim);
249 
250  {
251  int start_iter = sim[0].solver.restart_iter;
252  int n_iter = sim[0].solver.n_iter;
253  double dt = sim[0].solver.dt;
254 
255  double cur_time = start_iter * dt;
256  op_times_arr.push_back(cur_time);
257 
258  for (int iter = start_iter; iter < n_iter; iter++) {
259  cur_time += dt;
260  if ( ( (iter+1)%sim[0].solver.file_op_iter == 0)
261  && ( (iter+1) < n_iter) ) {
262  op_times_arr.push_back(cur_time);
263  }
264  }
265 
266  double t_final = n_iter*dt;
267  op_times_arr.push_back(t_final);
268  }
269 
270  double total_rom_predict_time = 0;
271  for (int iter = 0; iter < op_times_arr.size(); iter++) {
272 
273  double waqt = op_times_arr[iter];
274 
275  rom_interface.predict(sim, waqt);
276  if (!rank) printf( "libROM: Predicted solution at time %1.4e using ROM, wallclock time: %f.\n",
277  waqt, rom_interface.predictWallclockTime() );
278  total_rom_predict_time += rom_interface.predictWallclockTime();
279 
280  /* write the solution to file */
281  for (int ns = 0; ns < nsims; ns++) {
282  if (sim[ns].solver.PhysicsOutput) {
283  sim[ns].solver.PhysicsOutput( &(sim[ns].solver),
284  &(sim[ns].mpi),
285  waqt );
286  }
287  }
288  OutputSolution(sim, nsims, waqt);
289 
290  }
291 
292  /* calculate error if exact solution has been provided */
293  for (int ns = 0; ns < nsims; ns++) {
294  CalculateError(&(sim[ns].solver),
295  &(sim[ns].mpi) );
296  }
297 
298  if (!rank) {
299  printf( "libROM: total prediction/query wallclock time: %f (seconds).\n",
300  total_rom_predict_time );
301  }
302 
303  }
304 #endif
305 
306  return 0;
307 }
int CalculateError(void *, void *)
Structure of variables/parameters and function pointers for time integration.
int TimeInitialize(void *, int, int, int, void *)
int(* PhysicsOutput)(void *, void *, double)
Definition: hypar.h:347
int TimePreStep(void *)
Definition: TimePreStep.c:22
Structure defining a simulation.
int OutputSolution(void *, int, double)
int n_iter
Definition: hypar.h:55
#define _ROM_MODE_PREDICT_
int TimePrintStep(void *)
Definition: TimePrintStep.c:16
int restart_iter
Definition: hypar.h:58
int OutputROMSolution(void *, int, double)
int TimePostStep(void *)
Definition: TimePostStep.c:28
#define _ROM_MODE_NONE_
#define _MAX_STRING_SIZE_
Definition: basic.h:14
#define _ROM_MODE_TRAIN_
int WriteArray(int, int, int *, int *, int, double *, double *, void *, void *, char *)
Definition: WriteArray.c:27
void GetStringFromInteger(int, char *, int)
int TimeCleanup(void *)
Definition: TimeCleanup.c:18
double dt
Definition: hypar.h:67
double rom_diff_norms[3]
Definition: hypar.h:405
int TimeStep(void *)
Definition: TimeStep.c:13
Class implementing interface with libROM.
void ResetFilenameIndex(char *, int)
int CalculateROMDiff(void *, void *)