HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
Interp1PrimFifthOrderCompactUpwindChar.c
Go to the documentation of this file.
1 
6 #include <stdio.h>
7 #include <basic.h>
8 #include <arrayfunctions.h>
9 #include <matmult_native.h>
10 #include <mathfunctions.h>
11 #include <interpolation.h>
12 #include <tridiagLU.h>
13 #include <mpivars.h>
14 #include <hypar.h>
15 
16 #ifdef with_omp
17 #include <omp.h>
18 #endif
19 
20 #undef _MINIMUM_GHOSTS_
21 
25 #define _MINIMUM_GHOSTS_ 3
26 
91  double *fI,
92  double *fC,
93  double *u,
94  double *x,
95  int upw,
96  int dir,
97  void *s,
98  void *m,
99  int uflag
100  )
101 {
102  HyPar *solver = (HyPar*) s;
103  MPIVariables *mpi = (MPIVariables*) m;
104  CompactScheme *compact= (CompactScheme*) solver->compact;
105  TridiagLU *lu = (TridiagLU*) solver->lusolver;
106  int sys,Nsys,d,v,k;
108 
109  int ghosts = solver->ghosts;
110  int ndims = solver->ndims;
111  int nvars = solver->nvars;
112  int *dim = solver->dim_local;
113 
114  /* define some constants */
115  static const double three_by_ten = 3.0/10.0,
116  six_by_ten = 6.0/10.0,
117  one_by_ten = 1.0/10.0,
118  one_by_thirty = 1.0/30.0,
119  nineteen_by_thirty = 19.0/30.0,
120  one_third = 1.0/3.0,
121  thirteen_by_sixty = 13.0/60.0,
122  fortyseven_by_sixty = 47.0/60.0,
123  twentyseven_by_sixty = 27.0/60.0,
124  one_by_twenty = 1.0/20.0;
125 
126  /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along
127  dimension "dir" */
128  int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
129  _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
130  _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
131 
132  /* calculate total number of block tridiagonal systems to solve */
133  _ArrayProduct1D_(bounds_outer,ndims,Nsys);
134 
135  /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */
136  double R[nvars*nvars], L[nvars*nvars], uavg[nvars];
137 
138  /* Allocate arrays for tridiagonal system */
139  double *A = compact->A;
140  double *B = compact->B;
141  double *C = compact->C;
142  double *F = compact->R;
143 
144 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI)
145  for (sys=0; sys<Nsys; sys++) {
146  _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
147  _ArrayCopy1D_(index_outer,indexC,ndims);
148  _ArrayCopy1D_(index_outer,indexI,ndims);
149  for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
150  int qm1,qm2,qm3,qp1,qp2;
151  if (upw > 0) {
152  indexC[dir] = indexI[dir]-3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
153  indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
154  indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
155  indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
156  indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
157  } else {
158  indexC[dir] = indexI[dir]+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
159  indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
160  indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
161  indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
162  indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
163  }
164 
165  int p; /* 1D index of the interface */
166  _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
167 
168  /* find averaged state at this interface */
169  IERR solver->AveragingFunction(uavg,&u[nvars*qm1],&u[nvars*qp1],solver->physics); CHECKERR(ierr);
170 
171  /* Get the left and right eigenvectors */
172  IERR solver->GetLeftEigenvectors (uavg,L,solver->physics,dir); CHECKERR(ierr);
173  IERR solver->GetRightEigenvectors (uavg,R,solver->physics,dir); CHECKERR(ierr);
174 
175  for (v=0; v<nvars; v++) {
176 
177  /* calculate the characteristic flux components along this characteristic */
178  double fm3, fm2, fm1, fp1, fp2;
179  fm3 = fm2 = fm1 = fp1 = fp2 = 0;
180  for (k = 0; k < nvars; k++) {
181  fm3 += L[v*nvars+k] * fC[qm3*nvars+k];
182  fm2 += L[v*nvars+k] * fC[qm2*nvars+k];
183  fm1 += L[v*nvars+k] * fC[qm1*nvars+k];
184  fp1 += L[v*nvars+k] * fC[qp1*nvars+k];
185  fp2 += L[v*nvars+k] * fC[qp2*nvars+k];
186  }
187 
188  if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
189  || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
190  /* Use 5th order upwind at the physical boundaries */
191  for (k=0; k<nvars; k++) {
192  A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = 0.0;
193  C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = 0.0;
194  B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = L[v*nvars+k];
195  }
196  F[(Nsys*indexI[dir]+sys)*nvars+v] = one_by_thirty * fm3
197  - thirteen_by_sixty * fm2
198  + fortyseven_by_sixty * fm1
199  + twentyseven_by_sixty * fp1
200  - one_by_twenty * fp2;
201  } else {
202  /* 5th order compact upwind at the interior points */
203  if (upw > 0) {
204  for (k=0; k<nvars; k++) {
205  A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = three_by_ten * L[v*nvars+k];
206  B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = six_by_ten * L[v*nvars+k];
207  C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = one_by_ten * L[v*nvars+k];
208  }
209  } else {
210  for (k=0; k<nvars; k++) {
211  C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = three_by_ten * L[v*nvars+k];
212  B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = six_by_ten * L[v*nvars+k];
213  A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = one_by_ten * L[v*nvars+k];
214  }
215  }
216  F[(Nsys*indexI[dir]+sys)*nvars+v] = one_by_thirty * fm2
217  + nineteen_by_thirty * fm1
218  + one_third * fp1;
219  }
220  }
221  }
222  }
223 
224 #ifdef serial
225 
226  /* Solve the tridiagonal system */
227  IERR blocktridiagLU(A,B,C,F,dim[dir]+1,Nsys,nvars,lu,NULL); CHECKERR(ierr);
228 
229 #else
230 
231  /* Solve the tridiagonal system */
232  /* all processes except the last will solve without the last interface to avoid overlap */
233  if (mpi->ip[dir] != mpi->iproc[dir]-1) {
234  IERR blocktridiagLU(A,B,C,F,dim[dir] ,Nsys,nvars,lu,&mpi->comm[dir]); CHECKERR(ierr);
235  } else {
236  IERR blocktridiagLU(A,B,C,F,dim[dir]+1,Nsys,nvars,lu,&mpi->comm[dir]); CHECKERR(ierr);
237  }
238 
239  /* Now get the solution to the last interface from the next proc */
240  double *sendbuf = compact->sendbuf;
241  double *recvbuf = compact->recvbuf;
242  MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
243  if (mpi->ip[dir]) for (d=0; d<Nsys*nvars; d++) sendbuf[d] = F[d];
244  if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys*nvars,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]);
245  if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys*nvars,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]);
246  MPI_Status status_arr[2];
247  MPI_Waitall(2,&req[0],status_arr);
248  if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys*nvars; d++) F[d+Nsys*nvars*dim[dir]] = recvbuf[d];
249 
250 #endif
251 
252  /* save the solution to fI */
253 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI)
254  for (sys=0; sys<Nsys; sys++) {
255  _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
256  _ArrayCopy1D_(index_outer,indexI,ndims);
257  for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
258  int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
259  _ArrayCopy1D_((F+sys*nvars+Nsys*nvars*indexI[dir]),(fI+nvars*p),nvars);
260  }
261  }
262 
263  return(0);
264 }
int nvars
Definition: hypar.h:29
#define IERR
Definition: basic.h:16
MPI related function definitions.
#define CHECKERR(ierr)
Definition: basic.h:18
Contains function definitions for common mathematical functions.
Header file for TridiagLU.
int(* AveragingFunction)(double *, double *, double *, void *)
Definition: hypar.h:354
Some basic definitions and macros.
#define _ArrayIndexnD_(N, index, imax, i, ghost)
int ndims
Definition: hypar.h:26
int blocktridiagLU(double *, double *, double *, double *, int, int, int, void *, void *)
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
Contains structure definition for hypar.
MPI_Comm * comm
#define _ArrayIndex1D_(N, imax, i, ghost, index)
void * lusolver
Definition: hypar.h:368
int Interp1PrimFifthOrderCompactUpwindChar(double *fI, double *fC, double *u, double *x, int upw, int dir, void *s, void *m, int uflag)
5th order compact upwind reconstruction (characteristic-based) on a uniform grid
double * recvbuf
Structure of variables/parameters needed by the compact schemes.
int * dim_local
Definition: hypar.h:37
void * physics
Definition: hypar.h:266
int ghosts
Definition: hypar.h:52
Contains macros and function definitions for common matrix multiplication.
Structure of MPI-related variables.
Definitions for the functions computing the interpolated value of the primitive at the cell interface...
#define _DECLARE_IERR_
Definition: basic.h:17
#define _ArrayCopy1D_(x, y, size)
Contains macros and function definitions for common array operations.
int(* GetRightEigenvectors)(double *, double *, void *, int)
Definition: hypar.h:359
#define _ArrayProduct1D_(x, size, p)
int(* GetLeftEigenvectors)(double *, double *, void *, int)
Definition: hypar.h:357
void * compact
Definition: hypar.h:364
double * sendbuf