19 #undef _MINIMUM_GHOSTS_ 24 #define _MINIMUM_GHOSTS_ 3 91 int ghosts = solver->
ghosts;
92 int ndims = solver->
ndims;
93 int nvars = solver->
nvars;
98 static const double one_third = 1.0/3.0,
99 thirteen_by_sixty = 13.0/60.0,
100 fortyseven_by_sixty = 47.0/60.0,
101 twentyseven_by_sixty = 27.0/60.0,
102 one_by_twenty = 1.0/20.0,
103 one_by_thirty = 1.0/30.0,
104 nineteen_by_thirty = 19.0/30.0,
105 three_by_ten = 3.0/10.0,
106 six_by_ten = 6.0/10.0,
107 one_by_ten = 1.0/10.0;
111 int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
112 _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
113 _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
120 double *A = compact->
A;
121 double *B = compact->
B;
122 double *C = compact->
C;
123 double *R = compact->
R;
125 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI) 126 for (sys=0; sys < N_outer; sys++) {
130 for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
131 int qm1,qm2,qm3,qp1,qp2,p;
134 indexC[dir] = indexI[dir]-1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
135 qm3 = qm1 - 2*stride[dir];
136 qm2 = qm1 - stride[dir];
137 qp1 = qm1 + stride[dir];
138 qp2 = qm1 + 2*stride[dir];
140 indexC[dir] = indexI[dir] ;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
141 qm3 = qm1 + 2*stride[dir];
142 qm2 = qm1 + stride[dir];
143 qp1 = qm1 - stride[dir];
144 qp2 = qm1 - 2*stride[dir];
148 double *fm3, *fm2, *fm1, *fp1, *fp2;
155 if ( ((mpi->
ip[dir] == 0 ) && (indexI[dir] == 0 ))
156 || ((mpi->
ip[dir] == mpi->
iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
162 for (v=0; v<nvars; v++) {
163 (R+Nsys*indexI[dir]+sys*nvars)[v] = one_by_thirty * fm3[v]
164 - thirteen_by_sixty * fm2[v]
165 + fortyseven_by_sixty * fm1[v]
166 + twentyseven_by_sixty * fp1[v]
167 - one_by_twenty * fp2[v];
182 for (v=0; v<nvars; v++) {
183 (R+Nsys*indexI[dir]+sys*nvars)[v] = one_by_thirty * fm2[v]
184 + nineteen_by_thirty * fm1[v]
185 + one_third * fp1[v];
204 double *sendbuf = compact->
sendbuf;
205 double *recvbuf = compact->
recvbuf;
206 MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
207 if (mpi->
ip[dir])
for (d=0; d<Nsys; d++) sendbuf[d] = R[d];
208 if (mpi->
ip[dir] != mpi->
iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->
ip[dir]+1,214,mpi->
comm[dir],&req[0]);
209 if (mpi->
ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->
ip[dir]-1,214,mpi->
comm[dir],&req[1]);
210 MPI_Status status_arr[2];
211 MPI_Waitall(2,&req[0],status_arr);
212 if (mpi->
ip[dir] != mpi->
iproc[dir]-1)
for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d];
217 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI) 218 for (sys=0; sys < N_outer; sys++) {
221 for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
223 _ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars);
MPI related function definitions.
Contains function definitions for common mathematical functions.
Header file for TridiagLU.
Some basic definitions and macros.
#define _ArrayIndexnD_(N, index, imax, i, ghost)
Structure containing all solver-specific variables and functions.
Contains structure definition for hypar.
#define _ArrayIndex1D_(N, imax, i, ghost, index)
Structure of variables/parameters needed by the compact schemes.
#define _ArraySetValue_(x, size, value)
int tridiagLU(double *, double *, double *, double *, int, int, void *, void *)
Structure of MPI-related variables.
Definitions for the functions computing the interpolated value of the primitive at the cell interface...
#define _ArrayCopy1D_(x, y, size)
Contains macros and function definitions for common array operations.
int Interp1PrimFifthOrderCompactUpwind(double *fI, double *fC, double *u, double *x, int upw, int dir, void *s, void *m, int uflag)
5th order compact upwind reconstruction (component-wise) on a uniform grid
#define _ArrayProduct1D_(x, size, p)