HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
Interp1PrimFifthOrderHCWENOChar.c
Go to the documentation of this file.
1 
6 #include <stdio.h>
7 #include <basic.h>
8 #include <arrayfunctions.h>
9 #include <matmult_native.h>
10 #include <mathfunctions.h>
11 #include <interpolation.h>
12 #include <tridiagLU.h>
13 #include <mpivars.h>
14 #include <hypar.h>
15 
16 #ifdef with_omp
17 #include <omp.h>
18 #endif
19 
20 #undef _MINIMUM_GHOSTS_
21 
25 #define _MINIMUM_GHOSTS_ 3
26 
80  double *fI,
81  double *fC,
82  double *u,
83  double *x,
84  int upw,
85  int dir,
86  void *s,
87  void *m,
88  int uflag
89  )
90 {
91  HyPar *solver = (HyPar*) s;
92  MPIVariables *mpi = (MPIVariables*) m;
93  CompactScheme *compact= (CompactScheme*) solver->compact;
94  WENOParameters *weno = (WENOParameters*) solver->interp;
95  TridiagLU *lu = (TridiagLU*) solver->lusolver;
96  int sys,Nsys,d,v,k;
98 
99  int ghosts = solver->ghosts;
100  int ndims = solver->ndims;
101  int nvars = solver->nvars;
102  int *dim = solver->dim_local;
103 
104  /* define some constants */
105  static const double one_half = 1.0/2.0;
106  static const double one_third = 1.0/3.0;
107  static const double one_sixth = 1.0/6.0;
108 
109  double *ww1, *ww2, *ww3;
110  ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
111  ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
112  ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
113 
114  /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along
115  dimension "dir" */
116  int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
117  _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
118  _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
119 
120  /* calculate total number of block tridiagonal systems to solve */
121  _ArrayProduct1D_(bounds_outer,ndims,Nsys);
122 
123  /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */
124  double R[nvars*nvars], L[nvars*nvars], uavg[nvars];
125 
126  /* Allocate arrays for tridiagonal system */
127  double *A = compact->A;
128  double *B = compact->B;
129  double *C = compact->C;
130  double *F = compact->R;
131 
132 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI)
133  for (sys=0; sys<Nsys; sys++) {
134  _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
135  _ArrayCopy1D_(index_outer,indexC,ndims);
136  _ArrayCopy1D_(index_outer,indexI,ndims);
137  for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
138  int qm1,qm2,qm3,qp1,qp2;
139  if (upw > 0) {
140  indexC[dir] = indexI[dir]-3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
141  indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
142  indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
143  indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
144  indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
145  } else {
146  indexC[dir] = indexI[dir]+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
147  indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
148  indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
149  indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
150  indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
151  }
152 
153  int p; /* 1D index of the interface */
154  _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
155 
156  /* find averaged state at this interface */
157  IERR solver->AveragingFunction(uavg,&u[nvars*qm1],&u[nvars*qp1],solver->physics); CHECKERR(ierr);
158 
159  /* Get the left and right eigenvectors */
160  IERR solver->GetLeftEigenvectors (uavg,L,solver->physics,dir); CHECKERR(ierr);
161  IERR solver->GetRightEigenvectors (uavg,R,solver->physics,dir); CHECKERR(ierr);
162 
163  for (v=0; v<nvars; v++) {
164 
165  /* calculate the characteristic flux components along this characteristic */
166  double fm3, fm2, fm1, fp1, fp2;
167  fm3 = fm2 = fm1 = fp1 = fp2 = 0;
168  for (k = 0; k < nvars; k++) {
169  fm3 += L[v*nvars+k] * fC[qm3*nvars+k];
170  fm2 += L[v*nvars+k] * fC[qm2*nvars+k];
171  fm1 += L[v*nvars+k] * fC[qm1*nvars+k];
172  fp1 += L[v*nvars+k] * fC[qp1*nvars+k];
173  fp2 += L[v*nvars+k] * fC[qp2*nvars+k];
174  }
175 
176  /* Candidate stencils and their optimal weights*/
177  double f1, f2, f3;
178  if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
179  || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
180  /* Use WENO5 at the physical boundaries */
181  f1 = (2*one_sixth)*fm3 - (7.0*one_sixth)*fm2 + (11.0*one_sixth)*fm1;
182  f2 = (-one_sixth)*fm2 + (5.0*one_sixth)*fm1 + (2*one_sixth)*fp1;
183  f3 = (2*one_sixth)*fm1 + (5*one_sixth)*fp1 - (one_sixth)*fp2;
184  } else {
185  /* HCWENO5 at the interior points */
186  f1 = (one_sixth) * (fm2 + 5*fm1);
187  f2 = (one_sixth) * (5*fm1 + fp1);
188  f3 = (one_sixth) * (fm1 + 5*fp1);
189  }
190 
191  /* calculate WENO weights */
192  double w1,w2,w3;
193  w1 = *(ww1+p*nvars+v);
194  w2 = *(ww2+p*nvars+v);
195  w3 = *(ww3+p*nvars+v);
196 
197  /* calculate the hybridization parameter */
198  double sigma;
199  if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
200  || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
201  /* Standard WENO at physical boundaries */
202  sigma = 0.0;
203  } else {
204  double cuckoo, df_jm12, df_jp12, df_jp32, r_j, r_jp1, r_int;
205  cuckoo = (0.9*weno->rc / (1.0-0.9*weno->rc)) * weno->xi * weno->xi;
206  df_jm12 = fm1 - fm2;
207  df_jp12 = fp1 - fm1;
208  df_jp32 = fp2 - fp1;
209  r_j = (absolute(2*df_jp12*df_jm12)+cuckoo)/(df_jp12*df_jp12+df_jm12*df_jm12+cuckoo);
210  r_jp1 = (absolute(2*df_jp32*df_jp12)+cuckoo)/(df_jp32*df_jp32+df_jp12*df_jp12+cuckoo);
211  r_int = min(r_j, r_jp1);
212  sigma = min((r_int/weno->rc), 1.0);
213  }
214 
215  if (upw > 0) {
216  for (k=0; k<nvars; k++) {
217  A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = (one_half*sigma) * L[v*nvars+k];
218  B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = (1.0) * L[v*nvars+k];
219  C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = (one_sixth*sigma) * L[v*nvars+k];
220  }
221  } else {
222  for (k=0; k<nvars; k++) {
223  C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = (one_half*sigma) * L[v*nvars+k];
224  B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = (1.0) * L[v*nvars+k];
225  A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = (one_sixth*sigma) * L[v*nvars+k];
226  }
227  }
228  double fWENO, fCompact;
229  fWENO = w1*f1 + w2*f2 + w3*f3;
230  fCompact = one_sixth * (one_third*fm2 + 19.0*one_third*fm1 + 10.0*one_third*fp1);
231  F[(Nsys*indexI[dir]+sys)*nvars+v] = sigma*fCompact + (1.0-sigma)*fWENO;
232  }
233  }
234  }
235 
236 #ifdef serial
237 
238  /* Solve the tridiagonal system */
239  IERR blocktridiagLU(A,B,C,F,dim[dir]+1,Nsys,nvars,lu,NULL); CHECKERR(ierr);
240 
241 #else
242 
243  /* Solve the tridiagonal system */
244  /* all processes except the last will solve without the last interface to avoid overlap */
245  if (mpi->ip[dir] != mpi->iproc[dir]-1) {
246  IERR blocktridiagLU(A,B,C,F,dim[dir] ,Nsys,nvars,lu,&mpi->comm[dir]); CHECKERR(ierr);
247  } else {
248  IERR blocktridiagLU(A,B,C,F,dim[dir]+1,Nsys,nvars,lu,&mpi->comm[dir]); CHECKERR(ierr);
249  }
250 
251  /* Now get the solution to the last interface from the next proc */
252  double *sendbuf = compact->sendbuf;
253  double *recvbuf = compact->recvbuf;
254  MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
255  if (mpi->ip[dir]) for (d=0; d<Nsys*nvars; d++) sendbuf[d] = F[d];
256  if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys*nvars,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]);
257  if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys*nvars,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]);
258  MPI_Status status_arr[2];
259  MPI_Waitall(2,&req[0],status_arr);
260  if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys*nvars; d++) F[d+Nsys*nvars*dim[dir]] = recvbuf[d];
261 
262 #endif
263 
264  /* save the solution to fI */
265 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI)
266  for (sys=0; sys<Nsys; sys++) {
267  _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
268  _ArrayCopy1D_(index_outer,indexI,ndims);
269  for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
270  int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
271  _ArrayCopy1D_((F+sys*nvars+Nsys*nvars*indexI[dir]),(fI+nvars*p),nvars);
272  }
273  }
274 
275  return(0);
276 }
void * interp
Definition: hypar.h:362
Header file for TridiagLU.
MPI_Comm * comm
Definitions for the functions computing the interpolated value of the primitive at the cell interface...
#define _ArrayIndexnD_(N, index, imax, i, ghost)
void * physics
Definition: hypar.h:266
int(* GetRightEigenvectors)(double *, double *, void *, int)
Definition: hypar.h:359
int * dim_local
Definition: hypar.h:37
MPI related function definitions.
int blocktridiagLU(double *, double *, double *, double *, int, int, int, void *, void *)
double * recvbuf
Structure of variables/parameters needed by the WENO-type scheme.
Structure of variables/parameters needed by the compact schemes.
int ghosts
Definition: hypar.h:52
#define _ArrayIndex1D_(N, imax, i, ghost, index)
int(* AveragingFunction)(double *, double *, double *, void *)
Definition: hypar.h:354
void * compact
Definition: hypar.h:364
Contains function definitions for common mathematical functions.
int(* GetLeftEigenvectors)(double *, double *, void *, int)
Definition: hypar.h:357
#define _ArrayCopy1D_(x, y, size)
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
Contains structure definition for hypar.
int Interp1PrimFifthOrderHCWENOChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order hybrid compact-WENO reconstruction (characteristic-based) on a uniform grid ...
Some basic definitions and macros.
int ndims
Definition: hypar.h:26
Contains macros and function definitions for common array operations.
#define absolute(a)
Definition: math_ops.h:32
#define IERR
Definition: basic.h:16
#define _ArrayProduct1D_(x, size, p)
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
double * sendbuf
void * lusolver
Definition: hypar.h:368
#define min(a, b)
Definition: math_ops.h:14
Contains macros and function definitions for common matrix multiplication.
#define _DECLARE_IERR_
Definition: basic.h:17