20 #undef _MINIMUM_GHOSTS_
25 #define _MINIMUM_GHOSTS_ 3
109 int ghosts = solver->
ghosts;
110 int ndims = solver->
ndims;
111 int nvars = solver->
nvars;
115 static const double three_by_ten = 3.0/10.0,
116 six_by_ten = 6.0/10.0,
117 one_by_ten = 1.0/10.0,
118 one_by_thirty = 1.0/30.0,
119 nineteen_by_thirty = 19.0/30.0,
121 thirteen_by_sixty = 13.0/60.0,
122 fortyseven_by_sixty = 47.0/60.0,
123 twentyseven_by_sixty = 27.0/60.0,
124 one_by_twenty = 1.0/20.0;
128 int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
129 _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
130 _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
136 double R[nvars*nvars], L[nvars*nvars], uavg[nvars];
139 double *A = compact->
A;
140 double *B = compact->
B;
141 double *C = compact->
C;
142 double *F = compact->
R;
144 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI)
145 for (sys=0; sys<Nsys; sys++) {
149 for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
150 int qm1,qm2,qm3,qp1,qp2;
152 indexC[dir] = indexI[dir]-3;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
153 indexC[dir] = indexI[dir]-2;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
154 indexC[dir] = indexI[dir]-1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
155 indexC[dir] = indexI[dir] ;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
156 indexC[dir] = indexI[dir]+1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
158 indexC[dir] = indexI[dir]+2;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3);
159 indexC[dir] = indexI[dir]+1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2);
160 indexC[dir] = indexI[dir] ;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
161 indexC[dir] = indexI[dir]-1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1);
162 indexC[dir] = indexI[dir]-2;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2);
175 for (v=0; v<nvars; v++) {
178 double fm3, fm2, fm1, fp1, fp2;
179 fm3 = fm2 = fm1 = fp1 = fp2 = 0;
180 for (k = 0; k < nvars; k++) {
181 fm3 += L[v*nvars+k] * fC[qm3*nvars+k];
182 fm2 += L[v*nvars+k] * fC[qm2*nvars+k];
183 fm1 += L[v*nvars+k] * fC[qm1*nvars+k];
184 fp1 += L[v*nvars+k] * fC[qp1*nvars+k];
185 fp2 += L[v*nvars+k] * fC[qp2*nvars+k];
188 if ( ((mpi->
ip[dir] == 0 ) && (indexI[dir] == 0 ))
189 || ((mpi->
ip[dir] == mpi->
iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
191 for (k=0; k<nvars; k++) {
192 A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = 0.0;
193 C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = 0.0;
194 B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = L[v*nvars+k];
196 F[(Nsys*indexI[dir]+sys)*nvars+v] = one_by_thirty * fm3
197 - thirteen_by_sixty * fm2
198 + fortyseven_by_sixty * fm1
199 + twentyseven_by_sixty * fp1
200 - one_by_twenty * fp2;
204 for (k=0; k<nvars; k++) {
205 A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = three_by_ten * L[v*nvars+k];
206 B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = six_by_ten * L[v*nvars+k];
207 C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = one_by_ten * L[v*nvars+k];
210 for (k=0; k<nvars; k++) {
211 C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = three_by_ten * L[v*nvars+k];
212 B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = six_by_ten * L[v*nvars+k];
213 A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = one_by_ten * L[v*nvars+k];
216 F[(Nsys*indexI[dir]+sys)*nvars+v] = one_by_thirty * fm2
217 + nineteen_by_thirty * fm1
233 if (mpi->
ip[dir] != mpi->
iproc[dir]-1) {
240 double *sendbuf = compact->
sendbuf;
241 double *recvbuf = compact->
recvbuf;
242 MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
243 if (mpi->
ip[dir])
for (d=0; d<Nsys*nvars; d++) sendbuf[d] = F[d];
244 if (mpi->
ip[dir] != mpi->
iproc[dir]-1) MPI_Irecv(recvbuf,Nsys*nvars,MPI_DOUBLE,mpi->
ip[dir]+1,214,mpi->
comm[dir],&req[0]);
245 if (mpi->
ip[dir]) MPI_Isend(sendbuf,Nsys*nvars,MPI_DOUBLE,mpi->
ip[dir]-1,214,mpi->
comm[dir],&req[1]);
246 MPI_Status status_arr[2];
247 MPI_Waitall(2,&req[0],status_arr);
248 if (mpi->
ip[dir] != mpi->
iproc[dir]-1)
for (d=0; d<Nsys*nvars; d++) F[d+Nsys*nvars*dim[dir]] = recvbuf[d];
253 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI)
254 for (sys=0; sys<Nsys; sys++) {
257 for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
259 _ArrayCopy1D_((F+sys*nvars+Nsys*nvars*indexI[dir]),(fI+nvars*p),nvars);
Header file for TridiagLU.
Definitions for the functions computing the interpolated value of the primitive at the cell interface...
#define _ArrayIndexnD_(N, index, imax, i, ghost)
int(* GetRightEigenvectors)(double *, double *, void *, int)
int Interp1PrimFifthOrderCompactUpwindChar(double *, double *, double *, double *, int, int, void *, void *, int)
5th order compact upwind reconstruction (characteristic-based) on a uniform grid
MPI related function definitions.
int blocktridiagLU(double *, double *, double *, double *, int, int, int, void *, void *)
Structure of variables/parameters needed by the compact schemes.
#define _ArrayIndex1D_(N, imax, i, ghost, index)
int(* AveragingFunction)(double *, double *, double *, void *)
Contains function definitions for common mathematical functions.
int(* GetLeftEigenvectors)(double *, double *, void *, int)
#define _ArrayCopy1D_(x, y, size)
Contains structure definition for hypar.
Some basic definitions and macros.
Contains macros and function definitions for common array operations.
#define _ArrayProduct1D_(x, size, p)
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Contains macros and function definitions for common matrix multiplication.