19 #undef _MINIMUM_GHOSTS_
24 #define _MINIMUM_GHOSTS_ 3
83 int ghosts = solver->
ghosts;
84 int ndims = solver->
ndims;
85 int nvars = solver->
nvars;
89 static const double one_half = 1.0/2.0;
90 static const double one_third = 1.0/3.0;
91 static const double one_sixth = 1.0/6.0;
93 double *ww1, *ww2, *ww3;
94 ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
95 ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
96 ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
100 int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
101 _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
102 _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
109 double *A = compact->
A;
110 double *B = compact->
B;
111 double *C = compact->
C;
112 double *R = compact->
R;
114 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
115 for (sys=0; sys < N_outer; sys++) {
119 for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
120 int qm1,qm2,qm3,qp1,qp2,p;
123 indexC[dir] = indexI[dir]-3;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
124 indexC[dir] = indexI[dir]-2;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
125 indexC[dir] = indexI[dir]-1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
126 indexC[dir] = indexI[dir] ;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
127 indexC[dir] = indexI[dir]+1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
129 indexC[dir] = indexI[dir]+2;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
130 indexC[dir] = indexI[dir]+1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
131 indexC[dir] = indexI[dir] ;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
132 indexC[dir] = indexI[dir]-1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
133 indexC[dir] = indexI[dir]-2;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
136 for (v=0; v<nvars; v++) {
138 double fm3, fm2, fm1, fp1, fp2;
139 fm3 = fC[qm3*nvars+v];
140 fm2 = fC[qm2*nvars+v];
141 fm1 = fC[qm1*nvars+v];
142 fp1 = fC[qp1*nvars+v];
143 fp2 = fC[qp2*nvars+v];
147 f1 = (2*one_sixth)*fm3 - (7.0*one_sixth)*fm2 + (11.0*one_sixth)*fm1;
148 f2 = (-one_sixth)*fm2 + (5.0*one_sixth)*fm1 + (2*one_sixth)*fp1;
149 f3 = (2*one_sixth)*fm1 + (5*one_sixth)*fp1 - (one_sixth)*fp2;
153 w1 = *(ww1+p*nvars+v);
154 w2 = *(ww2+p*nvars+v);
155 w3 = *(ww3+p*nvars+v);
159 if ( ((mpi->
ip[dir] == 0 ) && (indexI[dir] == 0 ))
160 || ((mpi->
ip[dir] == mpi->
iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
164 double cuckoo, df_jm12, df_jp12, df_jp32, r_j, r_jp1, r_int;
165 cuckoo = (0.9*weno->rc / (1.0-0.9*weno->rc)) * weno->xi * weno->xi;
169 r_j = (
absolute(2*df_jp12*df_jm12)+cuckoo)/(df_jp12*df_jp12+df_jm12*df_jm12+cuckoo);
170 r_jp1 = (
absolute(2*df_jp32*df_jp12)+cuckoo)/(df_jp32*df_jp32+df_jp12*df_jp12+cuckoo);
171 r_int =
min(r_j, r_jp1);
172 sigma =
min((r_int/weno->rc), 1.0);
176 A[sys*nvars+v+Nsys*indexI[dir]] = one_half * sigma;
177 B[sys*nvars+v+Nsys*indexI[dir]] = 1.0;
178 C[sys*nvars+v+Nsys*indexI[dir]] = one_sixth * sigma;
180 C[sys*nvars+v+Nsys*indexI[dir]] = one_half * sigma;
181 B[sys*nvars+v+Nsys*indexI[dir]] = 1.0;
182 A[sys*nvars+v+Nsys*indexI[dir]] = one_sixth * sigma;
185 double fWENO, fCompact;
186 fWENO = w1*f1 + w2*f2 + w3*f3;
187 fCompact = one_sixth * (one_third*fm2 + 19.0*one_third*fm1 + 10.0*one_third*fp1);
188 R[sys*nvars+v+Nsys*indexI[dir]] = sigma*fCompact + (1.0-sigma)*fWENO;
206 double *sendbuf = compact->
sendbuf;
207 double *recvbuf = compact->
recvbuf;
208 MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
209 if (mpi->
ip[dir])
for (d=0; d<Nsys; d++) sendbuf[d] = R[d];
210 if (mpi->
ip[dir] != mpi->
iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->
ip[dir]+1,214,mpi->
comm[dir],&req[0]);
211 if (mpi->
ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->
ip[dir]-1,214,mpi->
comm[dir],&req[1]);
212 MPI_Status status_arr[2];
213 MPI_Waitall(2,&req[0],status_arr);
214 if (mpi->
ip[dir] != mpi->
iproc[dir]-1)
for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d];
219 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
220 for (sys=0; sys < N_outer; sys++) {
223 for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
225 _ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars);
Header file for TridiagLU.
Definitions for the functions computing the interpolated value of the primitive at the cell interface...
#define _ArrayIndexnD_(N, index, imax, i, ghost)
int tridiagLU(double *, double *, double *, double *, int, int, void *, void *)
MPI related function definitions.
Structure of variables/parameters needed by the WENO-type scheme.
Structure of variables/parameters needed by the compact schemes.
#define _ArrayIndex1D_(N, imax, i, ghost, index)
Contains function definitions for common mathematical functions.
#define _ArrayCopy1D_(x, y, size)
Contains structure definition for hypar.
Some basic definitions and macros.
Contains macros and function definitions for common array operations.
#define _ArrayProduct1D_(x, size, p)
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
int Interp1PrimFifthOrderHCWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order hybrid compact-WENO reconstruction (component-wise) on a uniform grid