HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
InitializeSolvers.c File Reference

Initialize all solvers. More...

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <io.h>
#include <tridiagLU.h>
#include <timeintegration.h>
#include <interpolation.h>
#include <firstderivative.h>
#include <secondderivative.h>
#include <mpivars.h>
#include <simulation_object.h>
#include <Python.h>

Go to the source code of this file.

Functions

int ApplyBoundaryConditions (void *, void *, double *, double *, double)
 Applies the boundary conditions specified for each boundary zone. More...
 
int ApplyIBConditions (void *, void *, double *, double)
 
int HyperbolicFunction (double *, double *, void *, void *, double, int, int(*)(double *, double *, int, void *, double), int(*)(double *, double *, double *, double *, double *, double *, int, void *, double))
 
int ParabolicFunctionNC1Stage (double *, double *, void *, void *, double)
 
int ParabolicFunctionNC2Stage (double *, double *, void *, void *, double)
 
int ParabolicFunctionNC1_5Stage (double *, double *, void *, void *, double)
 
int ParabolicFunctionCons1Stage (double *, double *, void *, void *, double)
 
int SourceFunction (double *, double *, void *, void *, double)
 
int VolumeIntegral (double *, double *, void *, void *)
 
int BoundaryIntegral (void *, void *)
 
int CalculateConservationError (void *, void *)
 
void IncrementFilenameIndex (char *, int)
 
int NonLinearInterpolation (double *, void *, void *, double, int(*)(double *, double *, int, void *, double))
 
int gpuHyperbolicFunction (double *, double *, void *, void *, double, int, int(*)(double *, double *, int, void *, double), int(*)(double *, double *, double *, double *, double *, double *, int, void *, double))
 
int InitializeSolvers (void *s, int nsims)
 

Detailed Description

Initialize all solvers.

Author
Debojyoti Ghosh

Definition in file InitializeSolvers.c.

Function Documentation

int ApplyBoundaryConditions ( void *  s,
void *  m,
double *  x,
double *  xref,
double  waqt 
)

Applies the boundary conditions specified for each boundary zone.

The solver object (of type HyPar) contains an oject of type DomainBoundary that contains all the boundary information (dimension, extent, face, type, etc). This function iterates through each of the boundary zones (HyPar::boundary[HyPar::nBoundaryZones]) and calls the corresponding boundary condition function.

The variable flag indicates if the array x is the solution, or a delta-solution (from implicit time-integration methods).

Parameters
sObject of type HyPar containing solver-related variables
mObject of type MPIVariables containing MPI-related variables
xThe solution vector on which the boundary conditions are to be applied
xrefReference solution vector, if needed
waqtCurrent simulation time

Definition at line 28 of file ApplyBoundaryConditions.c.

34 {
35  HyPar *solver = (HyPar*) s;
36  DomainBoundary *boundary = (DomainBoundary*) solver->boundary;
37  MPIVariables *mpi = (MPIVariables*) m;
38  int nb = solver->nBoundaryZones;
39 
40  int* dim_local;
41 #if defined(HAVE_CUDA)
42  if (solver->use_gpu) {
43  dim_local = solver->gpu_dim_local;
44  } else {
45 #endif
46  dim_local = solver->dim_local;
47 #if defined(HAVE_CUDA)
48  }
49 #endif
50 
51  /* Apply domain boundary conditions to x */
52  int n;
53  for (n = 0; n < nb; n++) {
54  boundary[n].BCFunctionU(&boundary[n],mpi,solver->ndims,solver->nvars,
55  dim_local,solver->ghosts,x,waqt);
56  }
57 
58  return(0);
59 }
int(* BCFunctionU)(void *, void *, int, int, int *, int, double *, double)
int * dim_local
Definition: hypar.h:37
int ghosts
Definition: hypar.h:52
void * boundary
Definition: hypar.h:159
int * gpu_dim_local
Definition: hypar.h:455
Structure containing the variables and function pointers defining a boundary.
int nBoundaryZones
Definition: hypar.h:157
int nvars
Definition: hypar.h:29
int use_gpu
Definition: hypar.h:449
int ndims
Definition: hypar.h:26
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int ApplyIBConditions ( void *  s,
void *  m,
double *  x,
double  waqt 
)

Call the physics-specific function that applies the immersed boundary conditions on the immersed boundary points.

Parameters
sObject of type HyPar containing solver-related variables
mObject of type MPIVariables containing MPI-related variables
xThe solution vector to apply immersed BCs on
waqtCurrent simulation time

Definition at line 14 of file ApplyIBConditions.c.

19 {
20  HyPar *solver = (HyPar*) s;
21  MPIVariables *mpi = (MPIVariables*) m;
22 
23  /* Apply immersed boundary conditions, if applicable */
24 #if defined(HAVE_CUDA)
25  if (solver->use_gpu) {
26  if (solver->flag_ib) {
27  fprintf(stderr, "ERROR: immersed boundaries have not yet been implemented on GPU.\n");
28  }
29  } else {
30 #endif
31  if (solver->flag_ib) solver->IBFunction(solver,mpi,x,waqt);
32 #if defined(HAVE_CUDA)
33  }
34 #endif
35 
36  return 0;
37 }
int flag_ib
Definition: hypar.h:441
int use_gpu
Definition: hypar.h:449
int(* IBFunction)(void *, void *, double *, double)
Definition: hypar.h:446
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int HyperbolicFunction ( double *  hyp,
double *  u,
void *  s,
void *  m,
double  t,
int  LimFlag,
int(*)(double *, double *, int, void *, double)  FluxFunction,
int(*)(double *, double *, double *, double *, double *, double *, int, void *, double)  UpwindFunction 
)

This function computes the hyperbolic term of the governing equations, expressed as follows:-

\begin{equation} {\bf F} \left({\bf u}\right) = \sum_{d=0}^{D-1} \frac {\partial {\bf f}_d\left({\bf u}\right)} {\partial x_d} \end{equation}

using a conservative finite-difference discretization is space:

\begin{equation} \hat{\bf F} \left({\bf u}\right) = \sum_{d=0}^{D-1} \frac {1} {\Delta x_d} \left[ \hat{\bf f}_{d,j+1/2} - \hat{\bf f}_{d,j-1/2} \right] \end{equation}

where \(d\) denotes the spatial dimension, \(D\) denotes the total number of spatial dimensions, the hat denotes the discretized quantity, and \(j\) is the grid coordinate along dimension \(d\). The approximation to the flux function \({\bf f}_d\) at the interfaces \(j\pm1/2\), denoted by \(\hat{\bf f}_{d,j\pm 1/2}\), are computed using the function ReconstructHyperbolic().

Parameters
hypArray to hold the computed hyperbolic term (shares the same layout as u
uSolution array
sSolver object of type HyPar
mMPI object of type MPIVariables
tCurrent simulation time
LimFlagFlag to indicate if the nonlinear coefficients for solution-dependent interpolation method should be recomputed (see ReconstructHyperbolic() for an explanation on why this is needed)
FluxFunctionFunction pointer to the flux function for the hyperbolic term
UpwindFunctionFunction pointer to the upwinding function for the hyperbolic term

Definition at line 31 of file HyperbolicFunction.c.

45 {
46  HyPar *solver = (HyPar*) s;
47  MPIVariables *mpi = (MPIVariables*) m;
48  int d, v, i, done;
49  double *FluxI = solver->fluxI; /* interface flux */
50  double *FluxC = solver->fluxC; /* cell centered flux */
52 
53  int ndims = solver->ndims;
54  int nvars = solver->nvars;
55  int ghosts = solver->ghosts;
56  int *dim = solver->dim_local;
57  int size = solver->npoints_local_wghosts;
58  double *x = solver->x;
59  double *dxinv = solver->dxinv;
60  int index[ndims], index1[ndims], index2[ndims], dim_interface[ndims];
61 
62  LimFlag = (LimFlag && solver->flag_nonlinearinterp && solver->SetInterpLimiterVar);
63 
64  _ArraySetValue_(hyp,size*nvars,0.0);
65  _ArraySetValue_(solver->StageBoundaryIntegral,2*ndims*nvars,0.0);
66  if (!FluxFunction) return(0); /* zero hyperbolic term */
67  solver->count_hyp++;
68 
69 #if defined(CPU_STAT)
70  double cpu_time = 0.0;
71  clock_t cpu_start, cpu_end;
72 #endif
73 
74  int offset = 0;
75  for (d = 0; d < ndims; d++) {
76  _ArrayCopy1D_(dim,dim_interface,ndims); dim_interface[d]++;
77  int size_cellcenter = 1; for (i = 0; i < ndims; i++) size_cellcenter *= (dim[i] + 2*ghosts);
78  int size_interface = 1; for (i = 0; i < ndims; i++) size_interface *= dim_interface[i];
79 
80  /* evaluate cell-centered flux */
81  IERR FluxFunction(FluxC,u,d,solver,t); CHECKERR(ierr);
82  /* compute interface fluxes */
83  IERR ReconstructHyperbolic(FluxI,FluxC,u,x+offset,d,solver,mpi,t,LimFlag,UpwindFunction);
84  CHECKERR(ierr);
85 
86  /* calculate the first derivative */
87  done = 0; _ArraySetValue_(index,ndims,0);
88  int p, p1, p2;
89 
90 #if defined(CPU_STAT)
91  cpu_start = clock();
92 #endif
93 
94  while (!done) {
95  _ArrayCopy1D_(index,index1,ndims);
96  _ArrayCopy1D_(index,index2,ndims); index2[d]++;
97  _ArrayIndex1D_(ndims,dim ,index ,ghosts,p);
98  _ArrayIndex1D_(ndims,dim_interface,index1,0 ,p1);
99  _ArrayIndex1D_(ndims,dim_interface,index2,0 ,p2);
100  for (v=0; v<nvars; v++) hyp[nvars*p+v] += dxinv[offset+ghosts+index[d]]
101  * (FluxI[nvars*p2+v]-FluxI[nvars*p1+v]);
102  /* boundary flux integral */
103  if (index[d] == 0)
104  for (v=0; v<nvars; v++) solver->StageBoundaryIntegral[(2*d+0)*nvars+v] -= FluxI[nvars*p1+v];
105  if (index[d] == dim[d]-1)
106  for (v=0; v<nvars; v++) solver->StageBoundaryIntegral[(2*d+1)*nvars+v] += FluxI[nvars*p2+v];
107 
108  _ArrayIncrementIndex_(ndims,dim,index,done);
109  }
110 
111 #if defined(CPU_STAT)
112  cpu_end = clock();
113  cpu_time += (double)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
114 #endif
115 
116  offset += dim[d] + 2*ghosts;
117  }
118 
119 #if defined(CPU_STAT)
120  printf("HyperbolicFunction CPU time = %8.6lf\n", cpu_time);
121 #endif
122 
123  if (solver->flag_ib) _ArrayBlockMultiply_(hyp,solver->iblank,size,nvars);
124 
125  return(0);
126 }
int npoints_local_wghosts
Definition: hypar.h:42
#define _ArraySetValue_(x, size, value)
double * iblank
Definition: hypar.h:436
#define _ArrayIncrementIndex_(N, imax, i, done)
int flag_ib
Definition: hypar.h:441
int * dim_local
Definition: hypar.h:37
int ghosts
Definition: hypar.h:52
#define _ArrayIndex1D_(N, imax, i, ghost, index)
int(* SetInterpLimiterVar)(double *, double *, double *, int, void *, void *)
Definition: hypar.h:234
static int ReconstructHyperbolic(double *, double *, double *, double *, int, void *, void *, double, int, int(*)(double *, double *, double *, double *, double *, double *, int, void *, double))
double * fluxI
Definition: hypar.h:136
#define _ArrayBlockMultiply_(x, a, n, bs)
#define _ArrayCopy1D_(x, y, size)
double * fluxC
Definition: hypar.h:128
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
double * StageBoundaryIntegral
Definition: hypar.h:382
int count_hyp
Definition: hypar.h:418
double * dxinv
Definition: hypar.h:110
int ndims
Definition: hypar.h:26
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
double * x
Definition: hypar.h:107
int flag_nonlinearinterp
Definition: hypar.h:411
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _DECLARE_IERR_
Definition: basic.h:17
int ParabolicFunctionNC1Stage ( double *  par,
double *  u,
void *  s,
void *  m,
double  t 
)

Evaluate the parabolic term using a conservative finite-difference spatial discretization: The parabolic term is assumed to be of the form:

\begin{equation} {\bf P}\left({\bf u}\right) = \sum_{d=0}^{D-1} \frac {\partial^2 {\bf g}_d\left(\bf u\right)} {\partial x_d^2}, \end{equation}

which is discretized at a grid point as:

\begin{equation} \left.{\bf P}\left({\bf u}\right)\right|_j = \sum_{d=0}^{D-1} \frac { \mathcal{L}_d \left[ {\bf g}_d \right] } {\Delta x_d^2}, \end{equation}

where \(d\) is the spatial dimension index, \(D\) is the total number of spatial dimensions (HyPar::ndims), and \(j\) is the grid index along \(d\). \(\mathcal{L}_d\) represents the finite-difference approximation to the Laplacian along the \(d\) spatial dimension, computed using HyPar::SecondDerivativePar.

Note: this form of the parabolic term does not allow for cross-derivatives.

To use this form of the parabolic term:

Parameters
pararray to hold the computed parabolic term
usolution
sSolver object of type HyPar
mMPI object of type MPIVariables
tCurrent simulation time

Definition at line 31 of file ParabolicFunctionNC1Stage.c.

38 {
39  HyPar *solver = (HyPar*) s;
40  MPIVariables *mpi = (MPIVariables*) m;
41  double *Func = solver->fluxC;
42  double *Deriv2 = solver->Deriv2;
43  int d, v, i, done;
45 
46  int ndims = solver->ndims;
47  int nvars = solver->nvars;
48  int ghosts = solver->ghosts;
49  int *dim = solver->dim_local;
50  double *dxinv = solver->dxinv;
51  int size = solver->npoints_local_wghosts;
52 
53  if (!solver->GFunction) return(0); /* zero parabolic terms */
54  solver->count_par++;
55 
56  int index[ndims];
57  _ArraySetValue_(par,size*nvars,0.0);
58 
59  int offset = 0;
60  for (d = 0; d < ndims; d++) {
61  int size_deriv = 1; for (i=0; i<ndims; i++) size_deriv *= dim[i];
62 
63  /* calculate the diffusion function */
64  IERR solver->GFunction(Func,u,d,solver,t); CHECKERR(ierr);
65  IERR solver->SecondDerivativePar(Deriv2,Func,d,solver,mpi); CHECKERR(ierr);
66 
67  /* calculate the final term - second derivative of the diffusion function */
68  done = 0; _ArraySetValue_(index,ndims,0);
69  int p;
70  while (!done) {
71  _ArrayIndex1D_(ndims,dim,index,ghosts,p);
72  for (v=0; v<nvars; v++)
73  par[nvars*p+v] += ( dxinv[offset+ghosts+index[d]]*dxinv[offset+ghosts+index[d]]
74  * Deriv2[nvars*p+v] );
75  _ArrayIncrementIndex_(ndims,dim,index,done);
76  }
77 
78  offset += dim[d] + 2*ghosts;
79  }
80 
81  if (solver->flag_ib) _ArrayBlockMultiply_(par,solver->iblank,size,nvars);
82  return(0);
83 }
int npoints_local_wghosts
Definition: hypar.h:42
#define _ArraySetValue_(x, size, value)
double * iblank
Definition: hypar.h:436
#define _ArrayIncrementIndex_(N, imax, i, done)
double * Deriv2
Definition: hypar.h:151
int flag_ib
Definition: hypar.h:441
int * dim_local
Definition: hypar.h:37
int ghosts
Definition: hypar.h:52
#define _ArrayIndex1D_(N, imax, i, ghost, index)
int count_par
Definition: hypar.h:418
#define _ArrayBlockMultiply_(x, a, n, bs)
double * fluxC
Definition: hypar.h:128
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
double * dxinv
Definition: hypar.h:110
int ndims
Definition: hypar.h:26
int(* SecondDerivativePar)(double *, double *, int, void *, void *)
Definition: hypar.h:247
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int(* GFunction)(double *, double *, int, void *, double)
Definition: hypar.h:310
#define _DECLARE_IERR_
Definition: basic.h:17
int ParabolicFunctionNC2Stage ( double *  par,
double *  u,
void *  s,
void *  m,
double  t 
)

Evaluate the parabolic term using a "1.5"-stage finite-difference spatial discretization: The parabolic term is assumed to be of the form:

\begin{equation} {\bf P}\left({\bf u}\right) = \sum_{d1=0}^{D-1}\sum_{d2=0}^{D-1} \frac {\partial^2 h_{d1,d2}\left(\bf u\right)} {\partial x_{d1} \partial x_{d2}}, \end{equation}

where \(d1\) and \(d2\) are spatial dimension indices, and \(D\) is the total number of spatial dimensions (HyPar::ndims). This term is discretized at a grid point as:

\begin{equation} \left.{\bf P}\left({\bf u}\right)\right|_j = \sum_{d1=0}^{D-1} \sum_{d2=0}^{D-1} \frac { \mathcal{D}_{d1}\mathcal{D}_{d2} \left[ {\bf h}_{d1,d2} \right] } {\Delta x_{d1} \Delta x_{d2}}, \end{equation}

where \(\mathcal{D}\) denotes the finite-difference approximation to the first derivative. Each of the first derivative approximations are \(\mathcal{D}_{d1}\) and \(\mathcal{D}_{d2}\) are computed separately, and thus the cross-derivative is evaluated in two steps using HyPar::FirstDerivativePar.

Notes:

  • This form of the parabolic term does allow for cross-derivatives ( \( d1 \ne d2 \)).
  • A \(n\)-th order central approximation to the second derivative can be expressed as a conjugation of two \((n-1)\)-th order approximations to the first derivative, one forward and one backward. Computing it this way avoids odd-even decoupling. Thus, where possible HyPar::FirstDerivativePar should point to the function computing \((n-1)\)-th order first derivative where \(n\) is the desired order. Currently, this is implemented only for \(n=2\). For other values of \(n\), the first derivative is also computed with a \(n\)-th order approximation.

To use this form of the parabolic term:

See Also
ParabolicFunctionNC1_5Stage()
Parameters
pararray to hold the computed parabolic term
usolution
sSolver object of type HyPar
mMPI object of type MPIVariables
tCurrent simulation time

Definition at line 40 of file ParabolicFunctionNC2Stage.c.

47 {
48  HyPar *solver = (HyPar*) s;
49  MPIVariables *mpi = (MPIVariables*) m;
50  double *Func = solver->fluxC;
51  double *Deriv1 = solver->Deriv1;
52  double *Deriv2 = solver->Deriv2;
53  int d, d1, d2, v, p, done;
54  double dxinv1, dxinv2;
56 
57  int ndims = solver->ndims;
58  int nvars = solver->nvars;
59  int ghosts = solver->ghosts;
60  int *dim = solver->dim_local;
61  double *dxinv = solver->dxinv;
62  int size = solver->npoints_local_wghosts;
63 
64  printf("HFunction is defined? = %p\n", solver->HFunction);
65 
66  if (!solver->HFunction) return(0); /* zero parabolic terms */
67  solver->count_par++;
68 
69  int index[ndims];
70  _ArraySetValue_(par,size*nvars,0.0);
71 
72  for (d1 = 0; d1 < ndims; d1++) {
73  for (d2 = 0; d2 < ndims; d2++) {
74 
75  /* calculate the diffusion function */
76  IERR solver->HFunction(Func,u,d1,d2,solver,t); CHECKERR(ierr);
77  IERR solver->FirstDerivativePar(Deriv1,Func ,d1, 1,solver,mpi); CHECKERR(ierr);
78  IERR MPIExchangeBoundariesnD(ndims,nvars,dim,ghosts,mpi,Deriv1); CHECKERR(ierr);
79  IERR solver->FirstDerivativePar(Deriv2,Deriv1,d2,-1,solver,mpi); CHECKERR(ierr);
80 
81  /* calculate the final term - second derivative of the diffusion function */
82  done = 0; _ArraySetValue_(index,ndims,0);
83  while (!done) {
84  _ArrayIndex1D_(ndims,dim,index,ghosts,p);
85  _GetCoordinate_(d1,index[d1],dim,ghosts,dxinv,dxinv1);
86  _GetCoordinate_(d2,index[d2],dim,ghosts,dxinv,dxinv2);
87  for (v=0; v<nvars; v++) par[nvars*p+v] += (dxinv1*dxinv2 * Deriv2[nvars*p+v]);
88  _ArrayIncrementIndex_(ndims,dim,index,done);
89  }
90 
91  }
92  }
93 
94  if (solver->flag_ib) _ArrayBlockMultiply_(par,solver->iblank,size,nvars);
95  return(0);
96 }
int npoints_local_wghosts
Definition: hypar.h:42
#define _ArraySetValue_(x, size, value)
double * iblank
Definition: hypar.h:436
#define _ArrayIncrementIndex_(N, imax, i, done)
double * Deriv1
Definition: hypar.h:151
int(* HFunction)(double *, double *, int, int, void *, double)
Definition: hypar.h:313
double * Deriv2
Definition: hypar.h:151
int flag_ib
Definition: hypar.h:441
int * dim_local
Definition: hypar.h:37
#define _GetCoordinate_(dir, i, dim, ghosts, x, coord)
Definition: basic.h:31
int ghosts
Definition: hypar.h:52
#define _ArrayIndex1D_(N, imax, i, ghost, index)
int count_par
Definition: hypar.h:418
#define _ArrayBlockMultiply_(x, a, n, bs)
int(* FirstDerivativePar)(double *, double *, int, int, void *, void *)
Definition: hypar.h:243
double * fluxC
Definition: hypar.h:128
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
double * dxinv
Definition: hypar.h:110
int ndims
Definition: hypar.h:26
#define IERR
Definition: basic.h:16
int MPIExchangeBoundariesnD(int, int, int *, int, void *, double *)
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _DECLARE_IERR_
Definition: basic.h:17
int ParabolicFunctionNC1_5Stage ( double *  par,
double *  u,
void *  s,
void *  m,
double  t 
)

Evaluate the parabolic term using a "1.5"-stage finite-difference spatial discretization: The parabolic term is assumed to be of the form:

\begin{equation} {\bf P}\left({\bf u}\right) = \sum_{d1=0}^{D-1}\sum_{d2=0}^{D-1} \frac {\partial^2 h_{d1,d2}\left(\bf u\right)} {\partial x_{d1} \partial x_{d2}}, \end{equation}

where \(d1\) and \(d2\) are spatial dimension indices, and \(D\) is the total number of spatial dimensions (HyPar::ndims). This term is discretized at a grid point as:

\begin{equation} \left.{\bf P}\left({\bf u}\right)\right|_j = \sum_{d1=0}^{D-1} \sum_{d2=0}^{D-1} \frac { \mathcal{D}_{d1}\mathcal{D}_{d2} \left[ {\bf h}_{d1,d2} \right] } {\Delta x_{d1} \Delta x_{d2}}, \end{equation}

where \(\mathcal{D}\) denotes the finite-difference approximation to the first derivative.

  • When \(d = d1 = d2\), the operator \(\mathcal{D}^2_d\left[\cdot\right] = \mathcal{D}_{d}\mathcal{D}_{d}\left[\cdot\right]\) is computed in one step as the finite-difference approximation to the second derivative (Laplacian) \(\mathcal{L}_d\left[\cdot\right]\) using HyPar::SecondDerivativePar.
  • When \(d1 \ne d2 \), each of the first derivative approximations are \(\mathcal{D}_{d1}\) and \(\mathcal{D}_{d2}\) are computed separately, and thus the cross-derivative is evaluated in two steps using HyPar::FirstDerivativePar.

Note: this form of the parabolic term does allow for cross-derivatives ( \( d1 \ne d2 \)).

To use this form of the parabolic term:

  • specify "par_space_type" in solver.inp as "nonconservative-1.5stage" (HyPar::spatial_type_par).
  • the physical model must specify \({\bf h}_{d1,d2}\left({\bf u}\right)\) through HyPar::HFunction.
See Also
ParabolicFunctionNC2Stage()
Parameters
pararray to hold the computed parabolic term
usolution
sSolver object of type HyPar
mMPI object of type MPIVariables
tCurrent simulation time

Definition at line 35 of file ParabolicFunctionNC1.5Stage.c.

42 {
43  HyPar *solver = (HyPar*) s;
44  MPIVariables *mpi = (MPIVariables*) m;
45  double *Func = solver->fluxC;
46  double *Deriv1 = solver->Deriv1;
47  double *Deriv2 = solver->Deriv2;
48  int d, d1, d2, v, p, done;
49  double dxinv1, dxinv2;
51 
52  int ndims = solver->ndims;
53  int nvars = solver->nvars;
54  int ghosts = solver->ghosts;
55  int *dim = solver->dim_local;
56  double *dxinv = solver->dxinv;
57  int size = solver->npoints_local_wghosts;
58 
59  if (!solver->HFunction) return(0); /* zero parabolic terms */
60  solver->count_par++;
61 
62  int index[ndims];
63  _ArraySetValue_(par,size*nvars,0.0);
64 
65  for (d1 = 0; d1 < ndims; d1++) {
66  for (d2 = 0; d2 < ndims; d2++) {
67 
68  /* calculate the diffusion function */
69  IERR solver->HFunction(Func,u,d1,d2,solver,t); CHECKERR(ierr);
70  if (d1 == d2) {
71  /* second derivative with respect to the same independent variable */
72  IERR solver->SecondDerivativePar(Deriv2,Func,d1,solver,mpi); CHECKERR(ierr);
73  } else {
74  /* cross derivative */
75  IERR solver->FirstDerivativePar(Deriv1,Func ,d1, 1,solver,mpi); CHECKERR(ierr);
76  IERR MPIExchangeBoundariesnD(ndims,nvars,dim,ghosts,mpi,Deriv1); CHECKERR(ierr);
77  IERR solver->FirstDerivativePar(Deriv2,Deriv1,d2,-1,solver,mpi); CHECKERR(ierr);
78  }
79 
80  /* calculate the final term - second derivative of the diffusion function */
81  done = 0; _ArraySetValue_(index,ndims,0);
82  while (!done) {
83  _ArrayIndex1D_(ndims,dim,index,ghosts,p);
84  _GetCoordinate_(d1,index[d1],dim,ghosts,dxinv,dxinv1);
85  _GetCoordinate_(d2,index[d2],dim,ghosts,dxinv,dxinv2);
86  for (v=0; v<nvars; v++) par[nvars*p+v] += (dxinv1*dxinv2 * Deriv2[nvars*p+v]);
87  _ArrayIncrementIndex_(ndims,dim,index,done);
88  }
89 
90  }
91  }
92 
93  if (solver->flag_ib) _ArrayBlockMultiply_(par,solver->iblank,size,nvars);
94  return(0);
95 }
int npoints_local_wghosts
Definition: hypar.h:42
#define _ArraySetValue_(x, size, value)
double * iblank
Definition: hypar.h:436
#define _ArrayIncrementIndex_(N, imax, i, done)
double * Deriv1
Definition: hypar.h:151
int(* HFunction)(double *, double *, int, int, void *, double)
Definition: hypar.h:313
double * Deriv2
Definition: hypar.h:151
int flag_ib
Definition: hypar.h:441
int * dim_local
Definition: hypar.h:37
#define _GetCoordinate_(dir, i, dim, ghosts, x, coord)
Definition: basic.h:31
int ghosts
Definition: hypar.h:52
#define _ArrayIndex1D_(N, imax, i, ghost, index)
int count_par
Definition: hypar.h:418
#define _ArrayBlockMultiply_(x, a, n, bs)
int(* FirstDerivativePar)(double *, double *, int, int, void *, void *)
Definition: hypar.h:243
double * fluxC
Definition: hypar.h:128
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
double * dxinv
Definition: hypar.h:110
int ndims
Definition: hypar.h:26
int(* SecondDerivativePar)(double *, double *, int, void *, void *)
Definition: hypar.h:247
#define IERR
Definition: basic.h:16
int MPIExchangeBoundariesnD(int, int, int *, int, void *, double *)
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _DECLARE_IERR_
Definition: basic.h:17
int ParabolicFunctionCons1Stage ( double *  par,
double *  u,
void *  s,
void *  m,
double  t 
)

Evaluate the parabolic term using a conservative finite-difference spatial discretization: The parabolic term is assumed to be of the form:

\begin{equation} {\bf P}\left({\bf u}\right) = \sum_{d=0}^{D-1} \frac {\partial^2 {\bf g}_d\left(\bf u\right)} {\partial x_d^2}, \end{equation}

which is discretized at a grid point as:

\begin{equation} \left.{\bf P}\left({\bf u}\right)\right|_j = \sum_{d=0}^{D-1} \frac { \hat{\bf g}_{j+1/2} - \hat{\bf g}_{j-1/2} } {\Delta x_d^2}, \end{equation}

where \(d\) is the spatial dimension index, \(D\) is the total number of spatial dimensions (HyPar::ndims), and \(j\) is the grid index along \(d\). \(\hat{\bf g}_d\) is the numerical approximation to the second primitive of \({\bf g}_d\left({\bf u}\right)\), computed using HyPar::InterpolateInterfacesPar.

Note: this form of the parabolic term does not allow for cross-derivatives.

To use this form of the parabolic term:

Reference: Liu, Y., Shu, C.-W., Zhang, M., "High order finite difference WENO schemes for nonlinear degenerate parabolic equations", SIAM J. Sci. Comput., 33 (2), 2011, pp. 939-965, http://dx.doi.org/10.1137/100791002

Parameters
pararray to hold the computed parabolic term
usolution
sSolver object of type HyPar
mMPI object of type MPIVariables
tCurrent simulation time

Definition at line 34 of file ParabolicFunctionCons1Stage.c.

41 {
42  HyPar *solver = (HyPar*) s;
43  MPIVariables *mpi = (MPIVariables*) m;
44  double *FluxI = solver->fluxI; /* interface flux array */
45  double *Func = solver->fluxC; /* diffusion function array */
46  int d, v, i, done;
48 
49  int ndims = solver->ndims;
50  int nvars = solver->nvars;
51  int ghosts = solver->ghosts;
52  int *dim = solver->dim_local;
53  double *dxinv = solver->dxinv;
54  int size = solver->npoints_local_wghosts;
55 
56  int index[ndims], index1[ndims], index2[ndims], dim_interface[ndims];
57 
58  _ArraySetValue_(par,size*nvars,0.0);
59  if (!solver->GFunction) return(0); /* zero parabolic term */
60  solver->count_par++;
61 
62  int offset = 0;
63  for (d = 0; d < ndims; d++) {
64  _ArrayCopy1D_(dim,dim_interface,ndims); dim_interface[d]++;
65  int size_cellcenter = 1; for (i = 0; i < ndims; i++) size_cellcenter *= (dim[i] + 2*ghosts);
66  int size_interface = 1; for (i = 0; i < ndims; i++) size_interface *= dim_interface[i];
67 
68  /* evaluate cell-centered flux */
69  IERR solver->GFunction(Func,u,d,solver,t); CHECKERR(ierr);
70  /* compute interface fluxes */
71  IERR solver->InterpolateInterfacesPar(FluxI,Func,d,solver,mpi); CHECKERR(ierr);
72 
73  /* calculate the second derivative */
74  done = 0; _ArraySetValue_(index,ndims,0);
75  int p, p1, p2;
76  while (!done) {
77  _ArrayCopy1D_(index,index1,ndims);
78  _ArrayCopy1D_(index,index2,ndims); index2[d]++;
79  _ArrayIndex1D_(ndims,dim ,index ,ghosts,p);
80  _ArrayIndex1D_(ndims,dim_interface,index1,0 ,p1);
81  _ArrayIndex1D_(ndims,dim_interface,index2,0 ,p2);
82  for (v=0; v<nvars; v++)
83  par[nvars*p+v] += ((dxinv[offset+ghosts+index[d]] * dxinv[offset+ghosts+index[d]])
84  * (FluxI[nvars*p2+v] - FluxI[nvars*p1+v]));
85  _ArrayIncrementIndex_(ndims,dim,index,done);
86  }
87 
88  offset += dim[d] + 2*ghosts;
89  }
90 
91  if (solver->flag_ib) _ArrayBlockMultiply_(par,solver->iblank,size,nvars);
92  return(0);
93 }
int npoints_local_wghosts
Definition: hypar.h:42
#define _ArraySetValue_(x, size, value)
double * iblank
Definition: hypar.h:436
#define _ArrayIncrementIndex_(N, imax, i, done)
int flag_ib
Definition: hypar.h:441
int * dim_local
Definition: hypar.h:37
int ghosts
Definition: hypar.h:52
#define _ArrayIndex1D_(N, imax, i, ghost, index)
int count_par
Definition: hypar.h:418
double * fluxI
Definition: hypar.h:136
int(* InterpolateInterfacesPar)(double *, double *, int, void *, void *)
Definition: hypar.h:239
#define _ArrayBlockMultiply_(x, a, n, bs)
#define _ArrayCopy1D_(x, y, size)
double * fluxC
Definition: hypar.h:128
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
double * dxinv
Definition: hypar.h:110
int ndims
Definition: hypar.h:26
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int(* GFunction)(double *, double *, int, void *, double)
Definition: hypar.h:310
#define _DECLARE_IERR_
Definition: basic.h:17
int SourceFunction ( double *  source,
double *  u,
void *  s,
void *  m,
double  t 
)

Evaluate the source term \({\bf S}\left({\bf u}\right)\) in the governing equation, if the physical model specifies one. In addition, if the simulation requires a sponge boundary treatment, the sponge BC function is called.

Parameters
sourcethe computed source term
usolution
ssolver object of type HyPar
mMPI object of type MPIVariables
tCurrent simulation time

Definition at line 22 of file SourceFunction.c.

29 {
30  HyPar *solver = (HyPar*) s;
31  MPIVariables *mpi = (MPIVariables*) m;
32 
33  /* initialize to zero */
34  int size = solver->ndof_cells_wghosts;
35 #if defined(HAVE_CUDA)
36  if (solver->use_gpu) {
37  gpuArraySetValue(source,size, 0.0);
38  } else {
39 #endif
40  _ArraySetValue_(source,size,0.0);
41 #if defined(HAVE_CUDA)
42  }
43 #endif
44 
45  /* call the source function of the physics model, if available */
46  if (solver->SFunction) {
47  solver->SFunction(source,u,solver,mpi,t);
48  solver->count_sou++;
49  }
50 
51  /* Apart from other source terms, implement sponge BC as a source */
52  DomainBoundary* boundary = (DomainBoundary*) solver->boundary;
53  int n;
54  int nb = solver->nBoundaryZones;
55 #if defined(HAVE_CUDA)
56  if (solver->use_gpu) {
57  for (n = 0; n < nb; n++) {
58  if (!strcmp(boundary[n].bctype,_SPONGE_)) {
59  fprintf(stderr,"ERROR: Sponge BC not yet implemented on GPU.\n");
60  }
61  }
62  } else {
63 #endif
64  for (n = 0; n < nb; n++) {
65  if (!strcmp(boundary[n].bctype,_SPONGE_)) {
66  BCSpongeSource( &boundary[n],
67  solver->ndims,
68  solver->nvars,
69  solver->ghosts,
70  solver->dim_local,
71  solver->x,
72  u,
73  source );
74  }
75  }
76 #if defined(HAVE_CUDA)
77  }
78 #endif
79 
80 
81  return(0);
82 }
#define _ArraySetValue_(x, size, value)
int BCSpongeSource(void *, int, int, int, int *, double *, double *, double *)
Definition: BCSponge.c:26
int * dim_local
Definition: hypar.h:37
void gpuArraySetValue(double *, int, double)
int ghosts
Definition: hypar.h:52
void * boundary
Definition: hypar.h:159
Structure containing the variables and function pointers defining a boundary.
int ndof_cells_wghosts
Definition: hypar.h:47
#define _SPONGE_
int nBoundaryZones
Definition: hypar.h:157
int(* SFunction)(double *, double *, void *, void *, double)
Definition: hypar.h:317
int nvars
Definition: hypar.h:29
int use_gpu
Definition: hypar.h:449
int count_sou
Definition: hypar.h:418
int ndims
Definition: hypar.h:26
Structure of MPI-related variables.
double * x
Definition: hypar.h:107
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int VolumeIntegral ( double *  VolumeIntegral,
double *  u,
void *  s,
void *  m 
)

Compute the volume integral of the solution.

Parameters
VolumeIntegralThe computed volume integral
uSolution
sSolver object of type HyPar
mMPI object of type MPIVariables

Definition at line 14 of file VolumeIntegral.c.

20 {
21  HyPar *solver = (HyPar*) s;
22  MPIVariables *mpi = (MPIVariables*) m;
23 
24  int ndims = solver->ndims;
25  int nvars = solver->nvars;
26  int *dim = solver->dim_local;
27  int ghosts = solver->ghosts;
28  int d,v;
29 
30  /* calculate local volume integral of the solution */
31  int index[ndims], done = 0;
32  double *local_integral = (double*) calloc (nvars,sizeof(double));
33  _ArraySetValue_(local_integral,nvars,0.0);
34  _ArraySetValue_(index,ndims,0);
35  while (!done) {
36  int p; _ArrayIndex1D_(ndims,dim,index,ghosts,p);
37  double dxinv[ndims];
38  for (d=0; d<ndims; d++) { _GetCoordinate_(d,index[d],dim,ghosts,solver->dxinv,dxinv[d]); }
39  double dV = 1.0; for (d=0; d<ndims; d++) dV *= (1.0/dxinv[d]);
40  for (v=0; v<nvars; v++) local_integral[v] += (u[p*nvars+v]*dV);
41  _ArrayIncrementIndex_(ndims,dim,index,done);
42  }
43  /* sum over all processors to get global integral of the solution */
44  IERR MPISum_double(VolumeIntegral,local_integral,nvars,&mpi->world); CHECKERR(ierr);
45  free(local_integral);
46 
47  return(0);
48 }
#define _ArraySetValue_(x, size, value)
#define _ArrayIncrementIndex_(N, imax, i, done)
int * dim_local
Definition: hypar.h:37
int MPISum_double(double *, double *, int, void *)
Definition: MPISum.c:39
#define _GetCoordinate_(dir, i, dim, ghosts, x, coord)
Definition: basic.h:31
int ghosts
Definition: hypar.h:52
#define _ArrayIndex1D_(N, imax, i, ghost, index)
MPI_Comm world
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
double * dxinv
Definition: hypar.h:110
int ndims
Definition: hypar.h:26
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int VolumeIntegral(double *VolumeIntegral, double *u, void *s, void *m)
int BoundaryIntegral ( void *  s,
void *  m 
)

Computes the flux integral over the boundary. The local flux integral (on this processor) is computed for physical as well as MPI boundaries. The global boundary integral is computed by summing the local integrals over all the processors, since the contributions from the MPI boundaries cancel out.

Parameters
sSolver object of type HyPar
mMPI object of type MPIVariables

Definition at line 17 of file BoundaryIntegral.c.

21 {
22  HyPar *solver = (HyPar*) s;
23  MPIVariables *mpi = (MPIVariables*) m;
24 
25  int ndims = solver->ndims;
26  int nvars = solver->nvars;
27  int *dim = solver->dim_local;
28  int ghosts = solver->ghosts;
29  int d,v,k;
30 
31  double *local_integral = (double*) calloc (nvars,sizeof(double));
32  double *global_integral = (double*) calloc (nvars,sizeof(double));
33 
34  /* calculate the local boundary integral on each process */
35  _ArraySetValue_(local_integral,nvars,0.0);
36  for (d=0; d<ndims; d++) {
37  for (v=0; v<nvars; v++) {
38  double dxinv[ndims], dS = 1.0;
39  for (k=0; k<ndims; k++) {
40  /* uniform grid assumed */
41  _GetCoordinate_(k,dim[k]/2,dim,ghosts,solver->dxinv,dxinv[k]);
42  }
43  for (k=0; k<ndims; k++) if (k!=d) dS *= (1.0/dxinv[k]);
44  local_integral[v] += (solver->StepBoundaryIntegral[(2*d+0)*nvars+v])*dS;
45  local_integral[v] += (solver->StepBoundaryIntegral[(2*d+1)*nvars+v])*dS;
46  }
47  }
48 
49  /* add across process to calculate global boundary integral
50  * (internal (MPI) boundaries must cancel out)
51  */
52  IERR MPISum_double(global_integral,local_integral,nvars,&mpi->world); CHECKERR(ierr);
53 
54  /* add to the total boundary integral */
55  _ArrayAXPY_(global_integral,1.0,solver->TotalBoundaryIntegral,nvars);
56 
57  free(local_integral);
58  free(global_integral);
59  return(0);
60 }
#define _ArraySetValue_(x, size, value)
int * dim_local
Definition: hypar.h:37
int MPISum_double(double *, double *, int, void *)
Definition: MPISum.c:39
#define _GetCoordinate_(dir, i, dim, ghosts, x, coord)
Definition: basic.h:31
int ghosts
Definition: hypar.h:52
MPI_Comm world
double * StepBoundaryIntegral
Definition: hypar.h:384
double * TotalBoundaryIntegral
Definition: hypar.h:386
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
double * dxinv
Definition: hypar.h:110
int ndims
Definition: hypar.h:26
#define IERR
Definition: basic.h:16
Structure of MPI-related variables.
#define _ArrayAXPY_(x, a, y, size)
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int CalculateConservationError ( void *  s,
void *  m 
)

Calculates the error (L2) in conservation by computing the difference between the initial volume integral of the solution, and the sum of the current volume integral and the time integral of the boundary flux from the start of the simulation to the current simulation time.

Parameters
sSolver object of type HyPar
mMPI object of type MPIVariables

Definition at line 16 of file CalculateConservationError.c.

20 {
21  HyPar *solver = (HyPar*) s;
22  int v,nvars = solver->nvars;
23  double error;
24 
25  double base[nvars];
26  for (v=0; v<nvars; v++) {
27  if (absolute(solver->VolumeIntegralInitial[v]) > 1.0)
28  base[v] = absolute(solver->VolumeIntegralInitial[v]);
29  else base[v] = 1.0;
30  }
31 
32  for (v=0; v<nvars; v++) {
33  error = (solver->VolumeIntegral[v]+solver->TotalBoundaryIntegral[v]-solver->VolumeIntegralInitial[v])
34  * (solver->VolumeIntegral[v]+solver->TotalBoundaryIntegral[v]-solver->VolumeIntegralInitial[v]);
35  solver->ConservationError[v] = sqrt(error)/base[v];
36  }
37 
38  return(0);
39 }
double * TotalBoundaryIntegral
Definition: hypar.h:386
double * VolumeIntegral
Definition: hypar.h:378
int nvars
Definition: hypar.h:29
double * VolumeIntegralInitial
Definition: hypar.h:380
double * ConservationError
Definition: hypar.h:374
#define absolute(a)
Definition: math_ops.h:32
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
void IncrementFilenameIndex ( char *  f,
int  len 
)
  Increment the output filename of the form op_nnnnn.dat by 1,

i.e., the number represented by the string nnnnn is incremented by 1.

Increment a character string representing an integer by 1. For example: "00002" -> "00003"; "3421934" -> "3421935"; "999" -> "000". The string can be of arbitrary length.

Parameters
fCharacter string representing the integer
lenLength of the string

Definition at line 46 of file IncrementFilename.c.

50 {
51  int i;
52  for (i=len-1; i>=0; i--) {
53  if (f[i] == '9') {
54  f[i] = '0';
55  if (!i) fprintf(stderr,"Warning: file increment hit max limit. Resetting to zero.\n");
56  } else {
57  f[i]++;
58  break;
59  }
60  }
61 }
int NonLinearInterpolation ( double *  u,
void *  s,
void *  m,
double  t,
int(*)(double *, double *, int, void *, double)  FluxFunction 
)

Compute the interpolation coefficients of a nonlinear interpolation method, based on the smoothness of the flux function of the solution passed as an argument. This function is provided separately so that these coefficients can be pre-computed and stored for future use.

In the implementation of non-linear methods (such as WENO5), the calculation of the non-linear coefficients, and the actual evaluation of the interpolant are separated into different functions. This provides flexibility on when the nonlinear coefficients are evaluated. Some scenarios are as follows:

  • For explicit time integration, it will be computed every time the hyperbolic flux term is being computed.
  • For implicit time integration, consistency or linearization may require that the coefficients be computed and "frozen" at the beginning of each stage. Thus, this function can be called at the beginning of each time integration stage, while HyperbolicFunction() is called with the argument LimFlag = 0.
Parameters
uSolution array
sSolver object of type HyPar
mMPI object of type MPIVariables
tCurrent solution time
FluxFunctionThe flux function \({\bf f}_d\left({\bf u}\right)\), whose properties (typically smoothness) is used to evaluate the nonlinear coefficients

Definition at line 28 of file NonLinearInterpolation.c.

38 {
39  HyPar *solver = (HyPar*) s;
40  MPIVariables *mpi = (MPIVariables*) m;
41  double *FluxC = solver->fluxC; /* cell centered flux */
43 
44  int flag = (FluxFunction && solver->flag_nonlinearinterp && solver->SetInterpLimiterVar);
45  if (flag) {;
46  int ndims = solver->ndims;
47  int nvars = solver->nvars;
48  int ghosts = solver->ghosts;
49  int *dim = solver->dim_local;
50  double *x = solver->x;
51 
52  int size = 1, d;
53  for (d=0; d<ndims; d++) size *= (dim[d] + 2*ghosts);
54 
55  /* apply boundary conditions and exchange data over MPI interfaces */
56  IERR solver->ApplyBoundaryConditions(solver,mpi,u,NULL,t); CHECKERR(ierr);
57  IERR solver->ApplyIBConditions(solver,mpi,u,t); CHECKERR(ierr);
58  IERR MPIExchangeBoundariesnD(ndims,nvars,dim,ghosts,mpi,u); CHECKERR(ierr);
59 
60  int offset = 0;
61  for (d = 0; d < ndims; d++) {
62  /* evaluate cell-centered flux */
63  IERR FluxFunction(FluxC,u,d,solver,t); CHECKERR(ierr);
64  /* calculate non-linear interpolation coefficients */
65  IERR solver->SetInterpLimiterVar(FluxC,u,x+offset,d,solver,mpi);CHECKERR(ierr);
66  offset += dim[d] + 2*ghosts;
67  }
68  }
69 
70  return(0);
71 }
int * dim_local
Definition: hypar.h:37
int ghosts
Definition: hypar.h:52
int(* SetInterpLimiterVar)(double *, double *, double *, int, void *, void *)
Definition: hypar.h:234
double * fluxC
Definition: hypar.h:128
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
int ndims
Definition: hypar.h:26
int(* ApplyBoundaryConditions)(void *, void *, double *, double *, double)
Definition: hypar.h:214
#define IERR
Definition: basic.h:16
int MPIExchangeBoundariesnD(int, int, int *, int, void *, double *)
Structure of MPI-related variables.
double * x
Definition: hypar.h:107
int(* ApplyIBConditions)(void *, void *, double *, double)
Definition: hypar.h:217
int flag_nonlinearinterp
Definition: hypar.h:411
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
#define _DECLARE_IERR_
Definition: basic.h:17
int gpuHyperbolicFunction ( double *  hyp,
double *  u,
void *  s,
void *  m,
double  t,
int  LimFlag,
int(*)(double *, double *, int, void *, double)  FluxFunction,
int(*)(double *, double *, double *, double *, double *, double *, int, void *, double)  UpwindFunction 
)

This function computes the hyperbolic term of the governing equations, expressed as follows:-

\begin{equation} {\bf F} \left({\bf u}\right) = \sum_{d=0}^{D-1} \frac {\partial {\bf f}_d\left({\bf u}\right)} {\partial x_d} \end{equation}

using a conservative finite-difference discretization is space:

\begin{equation} \hat{\bf F} \left({\bf u}\right) = \sum_{d=0}^{D-1} \frac {1} {\Delta x_d} \left[ \hat{\bf f}_{d,j+1/2} - \hat{\bf f}_{d,j-1/2} \right] \end{equation}

where \(d\) denotes the spatial dimension, \(D\) denotes the total number of spatial dimensions, the hat denotes the discretized quantity, and \(j\) is the grid coordinate along dimension \(d\). The approximation to the flux function \({\bf f}_d\) at the interfaces \(j\pm1/2\), denoted by \(\hat{\bf f}_{d,j\pm 1/2}\), are computed using the function ReconstructHyperbolic().

Parameters
hypArray to hold the computed hyperbolic term (shares the same layout as u
uSolution array
sSolver object of type HyPar
mMPI object of type MPIVariables
tCurrent simulation time
LimFlagFlag to indicate if the nonlinear coefficients for solution-dependent interpolation method should be recomputed (see ReconstructHyperbolic() for an explanation on why this is needed)
FluxFunctionFunction pointer to the flux function for the hyperbolic term
UpwindFunctionFunction pointer to the upwinding function for the hyperbolic term

Definition at line 473 of file HyperbolicFunction_GPU.cu.

488 {
489  HyPar *solver = (HyPar*) s;
490  MPIVariables *mpi = (MPIVariables*) m;
491  int d;
492  double *FluxI = solver->fluxI; /* interface flux */
493  double *FluxC = solver->fluxC; /* cell centered flux */
494 
495  int ndims = solver->ndims;
496  int nvars = solver->nvars;
497  int ghosts = solver->ghosts;
498  int *dim = solver->dim_local;
499  int size = solver->npoints_local_wghosts;
500  double *x = solver->gpu_x;
501  double *dxinv = solver->gpu_dxinv;
502  double *FluxI_p1, *FluxI_p2;
503 
504  LimFlag = (LimFlag && solver->flag_nonlinearinterp && solver->SetInterpLimiterVar);
505 
506  gpuMemset(hyp, 0, size*nvars*sizeof(double));
507  gpuMemset(solver->StageBoundaryBuffer, 0, solver->StageBoundaryBuffer_size*sizeof(double));
508  gpuMemset(solver->StageBoundaryIntegral, 0, 2*ndims*nvars*sizeof(double));
509 
510  if (!FluxFunction) return(0); /* zero hyperbolic term */
511  /*solver->count_hyp++;*/
512 
513  int npoints_grid = 1; for (d = 0; d < ndims; d++) npoints_grid *= dim[d];
514  int nblocks = (npoints_grid - 1) / GPU_THREADS_PER_BLOCK + 1;
515  int offset = 0;
516 
517 #if defined(GPU_STAT)
518  cudaEvent_t start, stop;
519  float milliseconds = 0;
520 
521  checkCuda( cudaEventCreate(&start) );
522  checkCuda( cudaEventCreate(&stop) );
523 #endif
524 
525  for (d = 0; d < ndims; d++) {
526  int npoints_dim_interface = 1; for (int i=0; i<ndims; i++) npoints_dim_interface *= (i==d) ? (dim[i]+1) : dim[i];
527  /* evaluate cell-centered flux */
528  FluxFunction(FluxC,u,d,solver,t);
529  /* compute interface fluxes */
530  ReconstructHyperbolic(FluxI,FluxC,u,x+offset,d,solver,mpi,t,LimFlag,UpwindFunction);
531 
532  FluxI_p1 = solver->StageBoundaryBuffer+(solver->gpu_npoints_boundary_offset[d]*nvars);
533  FluxI_p2 = solver->StageBoundaryBuffer+(solver->gpu_npoints_boundary_offset[d]+solver->gpu_npoints_boundary[d])*nvars;
534 
535 #if defined(GPU_STAT)
536  checkCuda( cudaEventRecord(start, 0) );
537 #endif
538  if (ndims == 3) {
539  HyperbolicFunction_dim3_kernel<<<nblocks, GPU_THREADS_PER_BLOCK>>>(
540  npoints_grid, size, npoints_dim_interface, d, nvars, ghosts, offset,
541  solver->gpu_dim_local, FluxI, dxinv,
542  hyp, FluxI_p1, FluxI_p2
543  );
544  } else {
545  fprintf(stderr,"gpuHyperbolicFunction(): Not implemented for ndims = %d!\n", ndims);
546  exit(1);
547  }
548  cudaDeviceSynchronize();
549 
550 #if defined(GPU_STAT)
551  checkCuda( cudaEventRecord(stop, 0) );
552  checkCuda( cudaEventSynchronize(stop) );
553  checkCuda( cudaEventElapsedTime(&milliseconds, start, stop) );
554  printf("%-50s GPU time = %.6f dir = %d bandwidth (GB/s) = %.2f\n",
555  "HyperbolicFunction", milliseconds*1e-3, d, (1e-6*2*npoints_grid*nvars*sizeof(double))/milliseconds);
556 
557  checkCuda( cudaEventRecord(start, 0) );
558 #endif
559 
560  StageBoundaryIntegral_kernel<GPU_THREADS_PER_BLOCK><<<1, GPU_THREADS_PER_BLOCK, GPU_THREADS_PER_BLOCK*nvars*sizeof(double)>>>(
561  solver->gpu_npoints_boundary[d], nvars, -1, FluxI_p1,
562  solver->StageBoundaryIntegral + 2*d*nvars
563  );
564  StageBoundaryIntegral_kernel<GPU_THREADS_PER_BLOCK><<<1, GPU_THREADS_PER_BLOCK, GPU_THREADS_PER_BLOCK*nvars*sizeof(double)>>>(
565  solver->gpu_npoints_boundary[d], nvars, 1, FluxI_p2,
566  solver->StageBoundaryIntegral + (2*d+1)*nvars
567  );
568  cudaDeviceSynchronize();
569 
570 #if defined(GPU_STAT)
571  checkCuda( cudaEventRecord(stop, 0) );
572  checkCuda( cudaEventSynchronize(stop) );
573  checkCuda( cudaEventElapsedTime(&milliseconds, start, stop) );
574  printf("%-50s GPU time = %.6f dir = %d bandwidth (GB/s) = %.2f\n",
575  "StageBoundaryIntegral", milliseconds*1e-3, d, (1e-6*2*solver->gpu_npoints_boundary[d]*nvars*sizeof(double))/milliseconds);
576 #endif
577  offset += dim[d] + 2*ghosts;
578  }
579 
580  if (solver->flag_ib) gpuArrayBlockMultiply(hyp, solver->gpu_iblank, size, nvars);
581 
582 #if defined(GPU_STAT)
583  checkCuda(cudaEventDestroy(start));
584  checkCuda(cudaEventDestroy(stop));
585 #endif
586 
587  return(0);
588 }
int npoints_local_wghosts
Definition: hypar.h:42
double * StageBoundaryBuffer
Definition: hypar.h:462
void gpuArrayBlockMultiply(double *, const double *, int, int)
int flag_ib
Definition: hypar.h:441
static int ReconstructHyperbolic(double *, double *, double *, double *, int, void *, void *, double, int, int(*)(double *, double *, double *, double *, double *, double *, int, void *, double))
int * dim_local
Definition: hypar.h:37
double * gpu_dxinv
Definition: hypar.h:458
int gpu_npoints_boundary[3]
Definition: hypar.h:453
double * gpu_iblank
Definition: hypar.h:456
#define GPU_THREADS_PER_BLOCK
Definition: basic_gpu.h:7
int ghosts
Definition: hypar.h:52
int(* SetInterpLimiterVar)(double *, double *, double *, int, void *, void *)
Definition: hypar.h:234
double * fluxI
Definition: hypar.h:136
int * gpu_dim_local
Definition: hypar.h:455
double * gpu_x
Definition: hypar.h:457
int gpu_npoints_boundary_offset[3]
Definition: hypar.h:452
double * fluxC
Definition: hypar.h:128
int nvars
Definition: hypar.h:29
void gpuMemset(void *, int, size_t)
double * StageBoundaryIntegral
Definition: hypar.h:382
int ndims
Definition: hypar.h:26
int StageBoundaryBuffer_size
Definition: hypar.h:461
Structure of MPI-related variables.
int flag_nonlinearinterp
Definition: hypar.h:411
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int InitializeSolvers ( void *  s,
int  nsims 
)

This function initializes all solvers-specific function pointers depending on user input. The specific functions used for spatial discretization, time integration, and solution output are set here.

Parameters
sArray of simulation objects of type SimulationObject
nsimsnumber of simulation objects

Definition at line 52 of file InitializeSolvers.c.

55 {
57  int ns;
59 
60  if (nsims == 0) return 0;
61 
62  if (!sim[0].mpi.rank) {
63  printf("Initializing solvers.\n");
64  }
65 
66  for (ns = 0; ns < nsims; ns++) {
67 
68  HyPar *solver = &(sim[ns].solver);
69  MPIVariables *mpi = &(sim[ns].mpi);
70 
74 #if defined(HAVE_CUDA)
75  if (solver->use_gpu) {
77  } else {
78 #endif
80 #if defined(HAVE_CUDA)
81  }
82 #endif
87 
88  /* choose the type of parabolic discretization */
89  solver->ParabolicFunction = NULL;
90  solver->SecondDerivativePar = NULL;
91  solver->FirstDerivativePar = NULL;
92  solver->InterpolateInterfacesPar = NULL;
93 
94 #if defined(HAVE_CUDA)
95  if (solver->use_gpu) {
96 
97  if (!strcmp(solver->spatial_type_par,_NC_2STAGE_)) {
98 
100 
101  if (!strcmp(solver->spatial_scheme_par,_FOURTH_ORDER_CENTRAL_)) {
103  } else {
104  fprintf(stderr,"ERROR (domain %d): scheme %s is not supported on GPU!",
105  ns, solver->spatial_scheme_par);
106  return 1;
107  }
108 
109  }
110 
111  } else {
112 #endif
113 
114  if (!strcmp(solver->spatial_type_par,_NC_1STAGE_)) {
115 
117  if (!strcmp(solver->spatial_scheme_par,_SECOND_ORDER_CENTRAL_)) {
119  } else if (!strcmp(solver->spatial_scheme_par,_FOURTH_ORDER_CENTRAL_)) {
121  } else {
122  fprintf(stderr,"Error (domain %d): %s is not a supported ",
123  ns, solver->spatial_scheme_par);
124  fprintf(stderr,"spatial scheme of type %s for the parabolic terms.\n",
125  solver->spatial_type_par);
126  }
127 
128  } else if (!strcmp(solver->spatial_type_par,_NC_2STAGE_)) {
129 
131  if (!strcmp(solver->spatial_scheme_par,_SECOND_ORDER_CENTRAL_)) {
133  /* why first order? see ParabolicFunctionNC2Stage.c. 2nd order central
134  approximation to the 2nd derivative can be expressed as a conjugation
135  of 1st order approximations to the 1st derivative (one forward and
136  one backward) -- this prevents odd-even decoupling */
137  } else if (!strcmp(solver->spatial_scheme_par,_FOURTH_ORDER_CENTRAL_)) {
139  /* why 4th order? I could not derive the decomposition of the
140  4th order central approximation to the 2nd derivative! Some problems
141  may show odd-even decoupling */
142  } else {
143  fprintf(stderr,"Error (domain %d): %s is not a supported ",
144  ns, solver->spatial_scheme_par);
145  fprintf(stderr,"spatial scheme of type %s for the parabolic terms.\n",
146  solver->spatial_type_par);
147  }
148 
149  } else if (!strcmp(solver->spatial_type_par,_NC_1_5STAGE_)) {
150 
152  if (!strcmp(solver->spatial_scheme_par,_SECOND_ORDER_CENTRAL_)) {
155  } else if (!strcmp(solver->spatial_scheme_par,_FOURTH_ORDER_CENTRAL_)) {
158  } else {
159  fprintf(stderr,"Error (domain %d): %s is not a supported ",
160  ns, solver->spatial_scheme_par);
161  fprintf(stderr,"spatial scheme of type %s for the parabolic terms.\n",
162  solver->spatial_type_par);
163  }
164 
165  } else if (!strcmp(solver->spatial_type_par,_CONS_1STAGE_)) {
166 
168  if (!strcmp(solver->spatial_scheme_par,_SECOND_ORDER_CENTRAL_)) {
170  } else {
171  fprintf(stderr,"Error (domain %d): %s is not a supported ",
172  ns, solver->spatial_scheme_par);
173  fprintf(stderr,"spatial scheme of type %s for the parabolic terms.\n",
174  solver->spatial_type_par);
175  }
176 
177  } else {
178 
179  fprintf(stderr,"Error (domain %d): %s is not a supported ",
180  ns, solver->spatial_type_par);
181  fprintf(stderr,"spatial discretization type for the parabolic terms.\n");
182  return(1);
183 
184  }
185 
186 #if defined(HAVE_CUDA)
187  }
188 #endif
189 
190  /* Spatial interpolation for hyperbolic term */
191  solver->interp = NULL;
192  solver->compact = NULL;
193  solver->lusolver = NULL;
194  solver->SetInterpLimiterVar = NULL;
195  solver->flag_nonlinearinterp = 1;
196  if (strcmp(solver->interp_type,_CHARACTERISTIC_) && strcmp(solver->interp_type,_COMPONENTS_)) {
197  fprintf(stderr,"Error in InitializeSolvers() (domain %d): %s is not a ",
198  ns, solver->interp_type);
199  fprintf(stderr,"supported interpolation type.\n");
200  return(1);
201  }
202 
203 #if defined(HAVE_CUDA)
204  if (solver->use_gpu) {
205 
206  if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_WENO_)) {
207 
208  /* Fifth order WENO scheme */
209  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
210  fprintf(stderr,
211  "Error (domain %d): characteristic-based WENO5 is not yet implemented on GPUs.\n",
212  ns );
213  return 1;
214  } else {
216  }
217  solver->interp = (WENOParameters*) calloc(1,sizeof(WENOParameters));
218  IERR WENOInitialize(solver,mpi,solver->spatial_scheme_hyp,solver->interp_type); CHECKERR(ierr);
219  solver->flag_nonlinearinterp = !(((WENOParameters*)solver->interp)->no_limiting);
220 
221  } else {
222 
223  fprintf(stderr,
224  "Error (domain %d): %s is a not a supported spatial interpolation scheme on GPUs.\n",
225  ns, solver->spatial_scheme_hyp);
226  return 1;
227  }
228 
229  } else {
230 #endif
231 
232  if (!strcmp(solver->spatial_scheme_hyp,_FIRST_ORDER_UPWIND_)) {
233 
234  /* First order upwind scheme */
235  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
237  } else {
239  }
240 
241  } else if (!strcmp(solver->spatial_scheme_hyp,_SECOND_ORDER_CENTRAL_)) {
242 
243  /* Second order central scheme */
244  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
246  } else {
248  }
249 
250  } else if (!strcmp(solver->spatial_scheme_hyp,_SECOND_ORDER_MUSCL_)) {
251 
252  /* Second order MUSCL scheme */
253  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
255  } else {
257  }
258  solver->interp = (MUSCLParameters*) calloc(1,sizeof(MUSCLParameters));
259  IERR MUSCLInitialize(solver,mpi); CHECKERR(ierr);
260 
261  } else if (!strcmp(solver->spatial_scheme_hyp,_THIRD_ORDER_MUSCL_)) {
262 
263  /* Third order MUSCL scheme */
264  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
266  } else {
268  }
269  solver->interp = (MUSCLParameters*) calloc(1,sizeof(MUSCLParameters));
270  IERR MUSCLInitialize(solver,mpi); CHECKERR(ierr);
271 
272  } else if (!strcmp(solver->spatial_scheme_hyp,_FOURTH_ORDER_CENTRAL_)) {
273 
274  /* Fourth order central scheme */
275  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
277  } else {
279  }
280 
281  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_UPWIND_)) {
282 
283  /* Fifth order upwind scheme */
284  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
286  } else {
288  }
289 
290  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_COMPACT_UPWIND_)) {
291 
292  /* Fifth order compact upwind scheme */
293  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
295  } else {
297  }
298  solver->compact = (CompactScheme*) calloc(1,sizeof(CompactScheme));
299  IERR CompactSchemeInitialize(solver,mpi,solver->interp_type);
300  solver->lusolver = (TridiagLU*) calloc (1,sizeof(TridiagLU));
301  IERR tridiagLUInit(solver->lusolver,&mpi->world);CHECKERR(ierr);
302 
303  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_WENO_)) {
304 
305  /* Fifth order WENO scheme */
306  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
308  } else {
310  }
311  solver->interp = (WENOParameters*) calloc(1,sizeof(WENOParameters));
312  IERR WENOInitialize(solver,mpi,solver->spatial_scheme_hyp,solver->interp_type); CHECKERR(ierr);
313  solver->flag_nonlinearinterp = !(((WENOParameters*)solver->interp)->no_limiting);
314 
315  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) {
316 
317  /* Fifth order CRWENO scheme */
318  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
320  } else {
322  }
323  solver->interp = (WENOParameters*) calloc(1,sizeof(WENOParameters));
324  IERR WENOInitialize(solver,mpi,solver->spatial_scheme_hyp,solver->interp_type); CHECKERR(ierr);
325  solver->flag_nonlinearinterp = !(((WENOParameters*)solver->interp)->no_limiting);
326  solver->compact = (CompactScheme*) calloc(1,sizeof(CompactScheme));
327  IERR CompactSchemeInitialize(solver,mpi,solver->interp_type);
328  solver->lusolver = (TridiagLU*) calloc (1,sizeof(TridiagLU));
329  IERR tridiagLUInit(solver->lusolver,&mpi->world);CHECKERR(ierr);
330 
331  } else if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_HCWENO_)) {
332 
333  /* Fifth order HCWENO scheme */
334  if ((solver->nvars > 1) && (!strcmp(solver->interp_type,_CHARACTERISTIC_))) {
336  } else {
338  }
339  solver->interp = (WENOParameters*) calloc(1,sizeof(WENOParameters));
340  IERR WENOInitialize(solver,mpi,solver->spatial_scheme_hyp,solver->interp_type); CHECKERR(ierr);
341  solver->flag_nonlinearinterp = !(((WENOParameters*)solver->interp)->no_limiting);
342  solver->compact = (CompactScheme*) calloc(1,sizeof(CompactScheme));
343  IERR CompactSchemeInitialize(solver,mpi,solver->interp_type);
344  solver->lusolver = (TridiagLU*) calloc (1,sizeof(TridiagLU));
345  IERR tridiagLUInit(solver->lusolver,&mpi->world);CHECKERR(ierr);
346 
347  } else {
348 
349  fprintf(stderr,"Error (domain %d): %s is a not a supported spatial interpolation scheme.\n",
350  ns, solver->spatial_scheme_hyp);
351  return(1);
352  }
353 
354 #if defined(HAVE_CUDA)
355  }
356 #endif
357 
358  /* Time integration */
359  solver->time_integrator = NULL;
360 #ifdef with_petsc
361  if (solver->use_petscTS) {
362  /* dummy -- not used */
364  solver->msti = NULL;
365  } else {
366  if (!strcmp(solver->time_scheme,_FORWARD_EULER_)) {
368  solver->msti = NULL;
369  } else if (!strcmp(solver->time_scheme,_RK_)) {
370  solver->TimeIntegrate = TimeRK;
371  solver->msti = (ExplicitRKParameters*) calloc (1,sizeof(ExplicitRKParameters));
373  solver->msti,mpi); CHECKERR(ierr);
374  } else if (!strcmp(solver->time_scheme,_GLM_GEE_)) {
375  solver->TimeIntegrate = TimeGLMGEE;
376  solver->msti = (GLMGEEParameters*) calloc (1,sizeof(GLMGEEParameters));
378  solver->msti,mpi); CHECKERR(ierr);
379  } else {
380  fprintf(stderr,"Error (domain %d): %s is a not a supported time-integration scheme.\n",
381  ns, solver->time_scheme);
382  return(1);
383  }
384  }
385 #else
386  if (!strcmp(solver->time_scheme,_FORWARD_EULER_)) {
388  solver->msti = NULL;
389  } else if (!strcmp(solver->time_scheme,_RK_)) {
390  solver->TimeIntegrate = TimeRK;
391  solver->msti = (ExplicitRKParameters*) calloc (1,sizeof(ExplicitRKParameters));
393  solver->msti,mpi); CHECKERR(ierr);
394  } else if (!strcmp(solver->time_scheme,_GLM_GEE_)) {
395  solver->TimeIntegrate = TimeGLMGEE;
396  solver->msti = (GLMGEEParameters*) calloc (1,sizeof(GLMGEEParameters));
398  solver->msti,mpi); CHECKERR(ierr);
399  } else {
400  fprintf(stderr,"Error (domain %d): %s is a not a supported time-integration scheme.\n",
401  ns, solver->time_scheme);
402  return(1);
403  }
404 #endif
405 
406  /* Solution output function */
407  solver->WriteOutput = NULL; /* default - no output */
408  solver->filename_index = NULL;
409  strcpy(solver->op_fname_root, "op");
410 #ifdef with_librom
411  strcpy(solver->op_rom_fname_root, "op_rom");
412 #endif
413  strcpy(solver->aux_op_fname_root, "ts0");
414  if (!strcmp(solver->output_mode,"serial")) {
415  solver->index_length = 5;
416  solver->filename_index = (char*) calloc (solver->index_length+1,sizeof(char));
417  int i; for (i=0; i<solver->index_length; i++) solver->filename_index[i] = '0';
418  solver->filename_index[solver->index_length] = (char) 0;
419  if (!strcmp(solver->op_file_format,"text")) {
420  solver->WriteOutput = WriteText;
421  strcpy(solver->solnfilename_extn,".dat");
422  } else if (!strcmp(solver->op_file_format,"tecplot2d")) {
423  solver->WriteOutput = WriteTecplot2D;
424  strcpy(solver->solnfilename_extn,".dat");
425  } else if (!strcmp(solver->op_file_format,"tecplot3d")) {
426  solver->WriteOutput = WriteTecplot3D;
427  strcpy(solver->solnfilename_extn,".dat");
428  } else if ((!strcmp(solver->op_file_format,"binary")) || (!strcmp(solver->op_file_format,"bin"))) {
429  solver->WriteOutput = WriteBinary;
430  strcpy(solver->solnfilename_extn,".bin");
431  } else if (!strcmp(solver->op_file_format,"none")) {
432  solver->WriteOutput = NULL;
433  } else {
434  fprintf(stderr,"Error (domain %d): %s is not a supported file format.\n",
435  ns, solver->op_file_format);
436  return(1);
437  }
438  if ((!strcmp(solver->op_overwrite,"no")) && solver->restart_iter) {
439  /* if it's a restart run, fast-forward the filename */
440  int t;
441  for (t=0; t<solver->restart_iter; t++)
442  if ((t+1)%solver->file_op_iter == 0) IncrementFilenameIndex(solver->filename_index,solver->index_length);
443  }
444  } else if (!strcmp(solver->output_mode,"parallel")) {
445  if (!strcmp(solver->op_file_format,"none")) solver->WriteOutput = NULL;
446  else {
447  /* only binary file writing supported in parallel mode */
448  /* use post-processing scripts to convert */
449  solver->WriteOutput = WriteBinary;
450  strcpy(solver->solnfilename_extn,".bin");
451  }
452  } else {
453  fprintf(stderr,"Error (domain %d): %s is not a supported output mode.\n",
454  ns, solver->output_mode);
455  fprintf(stderr,"Should be \"serial\" or \"parallel\". \n");
456  return(1);
457  }
458 
459  /* Solution plotting function */
460  strcpy(solver->plotfilename_extn,".png");
461 #ifdef with_python
462  solver->py_plt_func = NULL;
463  solver->py_plt_func_args = NULL;
464  {
465  char python_plotting_fname[_MAX_STRING_SIZE_] = "plotSolution";
466  PyObject* py_plot_name = PyUnicode_DecodeFSDefault(python_plotting_fname);
467  PyObject* py_plot_module = PyImport_Import(py_plot_name);
468  Py_DECREF(py_plot_name);
469  if (py_plot_module) {
470  solver->py_plt_func = PyObject_GetAttrString(py_plot_module, "plotSolution");
471  if (!solver->py_plt_func) {
472  if (!mpi->rank) {
473  printf("Unable to load plotSolution function from Python module.\n");
474  }
475  } else {
476  if (!mpi->rank) {
477  printf("Loaded Python module for plotting.\n");
478  printf("Loaded plotSolution function from Python module.\n");
479  }
480  }
481  } else {
482  if (!mpi->rank) {
483  printf("Unable to load Python module for plotting.\n");
484  }
485  }
486  }
487 #endif
488 
489  }
490 
491  return(0);
492 }
void * py_plt_func
Definition: hypar.h:466
char spatial_scheme_hyp[_MAX_STRING_SIZE_]
Definition: hypar.h:84
void * interp
Definition: hypar.h:362
#define _FORWARD_EULER_
int CompactSchemeInitialize(void *, void *, char *)
#define _CHARACTERISTIC_
Definition: interpolation.h:33
int Interp1PrimThirdOrderMUSCLChar(double *, double *, double *, double *, int, int, void *, void *, int)
3rd order MUSCL scheme with Koren&#39;s limiter (characteristic-based) on a uniform grid ...
int SecondDerivativeFourthOrderCentral(double *, double *, int, void *, void *)
int TimeForwardEuler(void *)
int ParabolicFunctionNC1Stage(double *par, double *u, void *s, void *m, double t)
int ParabolicFunctionNC2Stage(double *par, double *u, void *s, void *m, double t)
int ParabolicFunctionNC1_5Stage(double *par, double *u, void *s, void *m, double t)
char aux_op_fname_root[_MAX_STRING_SIZE_]
Definition: hypar.h:208
int HyperbolicFunction(double *hyp, double *u, void *s, void *m, double t, int LimFlag, int(*FluxFunction)(double *, double *, int, void *, double), int(*UpwindFunction)(double *, double *, double *, double *, double *, double *, int, void *, double))
int Interp1PrimSecondOrderCentralChar(double *, double *, double *, double *, int, int, void *, void *, int)
2nd order central reconstruction (characteristic-based) on a uniform grid
int NonLinearInterpolation(double *u, void *s, void *m, double t, int(*FluxFunction)(double *, double *, int, void *, double))
int gpuHyperbolicFunction(double *hyp, double *u, void *s, void *m, double t, int LimFlag, int(*FluxFunction)(double *, double *, int, void *, double), int(*UpwindFunction)(double *, double *, double *, double *, double *, double *, int, void *, double))
#define _FIFTH_ORDER_COMPACT_UPWIND_
Definition: interpolation.h:24
int Interp1PrimSecondOrderMUSCLChar(double *, double *, double *, double *, int, int, void *, void *, int)
2nd order MUSCL scheme (characteristic-based) on a uniform grid
int CalculateConservationError(void *s, void *m)
char spatial_type_par[_MAX_STRING_SIZE_]
Definition: hypar.h:96
#define _FIRST_ORDER_UPWIND_
Definition: interpolation.h:12
#define _THIRD_ORDER_MUSCL_
Definition: interpolation.h:18
int Interp1PrimFifthOrderCompactUpwindChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order compact upwind reconstruction (characteristic-based) on a uniform grid
int FirstDerivativeFirstOrder(double *, double *, int, int, void *, void *)
int FirstDerivativeFourthOrderCentral(double *, double *, int, int, void *, void *)
char op_file_format[_MAX_STRING_SIZE_]
Definition: hypar.h:186
int Interp1PrimThirdOrderMUSCL(double *, double *, double *, double *, int, int, void *, void *, int)
3rd order MUSCL scheme with Koren&#39;s limiter (component-wise) on a uniform grid
#define _SECOND_ORDER_CENTRAL_
int MUSCLInitialize(void *, void *)
int BoundaryIntegral(void *s, void *m)
int TimeRK(void *)
Definition: TimeRK.c:35
int TimeExplicitRKInitialize(char *, char *, void *, void *)
#define _FIFTH_ORDER_HCWENO_
Definition: interpolation.h:30
int Interp1PrimFifthOrderUpwind(double *, double *, double *, double *, int, int, void *, void *, int)
5th order upwind reconstruction (component-wise) on a uniform grid
int(* ParabolicFunction)(double *, double *, void *, void *, double)
Definition: hypar.h:256
Structure of variables/parameters needed by the WENO-type scheme.
Structure of variables/parameters needed by the compact schemes.
#define _GLM_GEE_
int Interp1PrimFifthOrderWENOChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order WENO reconstruction (characteristic-based) on a uniform grid
#define _FOURTH_ORDER_CENTRAL_
#define _CONS_1STAGE_
Definition: hypar.h:483
int(* NonlinearInterp)(double *, void *, void *, double, int(*)(double *, double *, int, void *, double))
Definition: hypar.h:228
int(* SetInterpLimiterVar)(double *, double *, double *, int, void *, void *)
Definition: hypar.h:234
char * filename_index
Definition: hypar.h:197
char op_fname_root[_MAX_STRING_SIZE_]
Definition: hypar.h:206
int Interp1PrimFifthOrderCompactUpwind(double *, double *, double *, double *, int, int, void *, void *, int)
5th order compact upwind reconstruction (component-wise) on a uniform grid
MPI_Comm world
int Interp1PrimFirstOrderUpwind(double *, double *, double *, double *, int, int, void *, void *, int)
1st order upwind reconstruction (component-wise) on a uniform grid
int ApplyIBConditions(void *s, void *m, double *x, double waqt)
#define _NC_2STAGE_
Definition: hypar.h:477
#define _MAX_STRING_SIZE_
Definition: basic.h:14
int TimeGLMGEE(void *)
Definition: TimeGLMGEE.c:45
Structure containing the parameters for the General Linear Methods with Global Error Estimators (GLM-...
Structure containing the parameters for an explicit Runge-Kutta method.
#define _FIFTH_ORDER_WENO_
Definition: interpolation.h:26
void * py_plt_func_args
Definition: hypar.h:467
char plotfilename_extn[_MAX_STRING_SIZE_]
Definition: hypar.h:203
int WENOInitialize(void *, void *, char *, char *)
#define _RK_
char time_scheme[_MAX_STRING_SIZE_]
Definition: hypar.h:78
char op_overwrite[_MAX_STRING_SIZE_]
Definition: hypar.h:191
int Interp1PrimFifthOrderWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order WENO reconstruction (component-wise) on a uniform grid
int(* SourceFunction)(double *, double *, void *, void *, double)
Definition: hypar.h:259
void * compact
Definition: hypar.h:364
int ApplyBoundaryConditions(void *s, void *m, double *x, double *xref, double waqt)
Applies the boundary conditions specified for each boundary zone.
int(* InterpolateInterfacesPar)(double *, double *, int, void *, void *)
Definition: hypar.h:239
int(* BoundaryIntegralFunction)(void *, void *)
Definition: hypar.h:390
int Interp1PrimFourthOrderCentralChar(double *, double *, double *, double *, int, int, void *, void *, int)
4th order central reconstruction (characteristic-based) on a uniform grid
#define _NC_1STAGE_
Definition: hypar.h:475
int(* VolumeIntegralFunction)(double *, double *, void *, void *)
Definition: hypar.h:388
char op_rom_fname_root[_MAX_STRING_SIZE_]
Definition: hypar.h:407
int ParabolicFunctionCons1Stage(double *par, double *u, void *s, void *m, double t)
int(* FirstDerivativePar)(double *, double *, int, int, void *, void *)
Definition: hypar.h:243
int Interp1PrimFourthOrderCentral(double *, double *, double *, double *, int, int, void *, void *, int)
4th order central reconstruction (component-wise) on a uniform grid
int SecondDerivativeSecondOrderCentral(double *, double *, int, void *, void *)
#define _NC_1_5STAGE_
Definition: hypar.h:481
int tridiagLUInit(void *, void *)
Definition: tridiagLUInit.c:39
#define _COMPONENTS_
Definition: interpolation.h:34
int(* HyperbolicFunction)(double *, double *, void *, void *, double, int, int(*)(double *, double *, int, void *, double), int(*)(double *, double *, double *, double *, double *, double *, int, void *, double))
Definition: hypar.h:250
#define _FIFTH_ORDER_UPWIND_
Definition: interpolation.h:22
int WriteTecplot3D(int, int, int *, double *, double *, char *, int *)
int nvars
Definition: hypar.h:29
int file_op_iter
Definition: hypar.h:171
char spatial_scheme_par[_MAX_STRING_SIZE_]
Definition: hypar.h:99
int Interp1PrimSecondOrderMUSCL(double *, double *, double *, double *, int, int, void *, void *, int)
2nd order MUSCL scheme (component-wise) on a uniform grid
int use_petscTS
Definition: hypar.h:395
int Interp1PrimSecondOrderCentral(double *, double *, double *, double *, int, int, void *, void *, int)
2nd order central reconstruction (component-wise) on a uniform grid
char time_scheme_type[_MAX_STRING_SIZE_]
Definition: hypar.h:81
int gpuInterp1PrimFifthOrderWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order WENO reconstruction (component-wise) on a uniform grid
int(* InterpolateInterfacesHyp)(double *, double *, double *, double *, int, int, void *, void *, int)
Definition: hypar.h:224
#define CHECKERR(ierr)
Definition: basic.h:18
int Interp1PrimFifthOrderCRWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order CRWENO reconstruction (component-wise) on a uniform grid
char interp_type[_MAX_STRING_SIZE_]
Definition: hypar.h:88
int Interp1PrimFifthOrderHCWENOChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order hybrid compact-WENO reconstruction (characteristic-based) on a uniform grid ...
#define _SECOND_ORDER_MUSCL_
Definition: interpolation.h:16
int use_gpu
Definition: hypar.h:449
int Interp1PrimFifthOrderCRWENOChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order CRWENO reconstruction (characteristic-based) on a uniform grid
void * msti
Definition: hypar.h:366
Structure defining a simulation.
int TimeGLMGEEInitialize(char *, char *, void *, void *)
int WriteText(int, int, int *, double *, double *, char *, int *)
Definition: WriteText.c:27
Structure of variables/parameters needed by the MUSCL scheme.
int SourceFunction(double *source, double *u, void *s, void *m, double t)
int(* SecondDerivativePar)(double *, double *, int, void *, void *)
Definition: hypar.h:247
int WriteTecplot2D(int, int, int *, double *, double *, char *, int *)
int(* ApplyBoundaryConditions)(void *, void *, double *, double *, double)
Definition: hypar.h:214
#define IERR
Definition: basic.h:16
char solnfilename_extn[_MAX_STRING_SIZE_]
Definition: hypar.h:201
#define _FIFTH_ORDER_CRWENO_
Definition: interpolation.h:28
int(* TimeIntegrate)(void *)
Definition: hypar.h:220
int(* CalculateConservationError)(void *, void *)
Definition: hypar.h:392
int index_length
Definition: hypar.h:199
int gpuFirstDerivativeFourthOrderCentral(double *, double *, int, int, void *, void *)
int WriteBinary(int, int, int *, double *, double *, char *, int *)
Definition: WriteBinary.c:34
int FirstDerivativeSecondOrderCentral(double *, double *, int, int, void *, void *)
Structure of MPI-related variables.
int(* ApplyIBConditions)(void *, void *, double *, double)
Definition: hypar.h:217
char output_mode[_MAX_STRING_SIZE_]
Definition: hypar.h:183
int flag_nonlinearinterp
Definition: hypar.h:411
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
int Interp1PrimFifthOrderHCWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order hybrid compact-WENO reconstruction (component-wise) on a uniform grid
void * lusolver
Definition: hypar.h:368
void IncrementFilenameIndex(char *f, int len)
int restart_iter
Definition: hypar.h:58
int VolumeIntegral(double *VolumeIntegral, double *u, void *s, void *m)
int Interp1PrimFirstOrderUpwindChar(double *, double *, double *, double *, int, int, void *, void *, int)
1st order upwind reconstruction (characteristic-based) on a uniform grid
int(* WriteOutput)(int, int, int *, double *, double *, char *, int *)
Definition: hypar.h:211
#define _DECLARE_IERR_
Definition: basic.h:17
int Interp1PrimFifthOrderUpwindChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order upwind reconstruction (characteristic-based) on a uniform grid
void * time_integrator
Definition: hypar.h:165
int Interp2PrimSecondOrder(double *, double *, int, void *, void *)
2nd order component-wise interpolation of the 2nd primitive on a uniform grid