19 #undef _MINIMUM_GHOSTS_ 24 #define _MINIMUM_GHOSTS_ 3 98 int ghosts = solver->
ghosts;
99 int ndims = solver->
ndims;
100 int nvars = solver->
nvars;
105 static const double one_third = 1.0/3.0;
106 static const double one_sixth = 1.0/6.0;
108 double *ww1, *ww2, *ww3;
109 ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
110 ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
111 ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
115 int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
116 _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
117 _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
124 double *A = compact->
A;
125 double *B = compact->
B;
126 double *C = compact->
C;
127 double *R = compact->
R;
129 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI) 130 for (sys=0; sys < N_outer; sys++) {
134 for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
135 int qm1,qm2,qm3,qp1,qp2,p;
138 indexC[dir] = indexI[dir]-1;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
139 qm3 = qm1 - 2*stride[dir];
140 qm2 = qm1 - stride[dir];
141 qp1 = qm1 + stride[dir];
142 qp2 = qm1 + 2*stride[dir];
144 indexC[dir] = indexI[dir] ;
_ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
145 qm3 = qm1 + 2*stride[dir];
146 qm2 = qm1 + stride[dir];
147 qp1 = qm1 - stride[dir];
148 qp2 = qm1 - 2*stride[dir];
152 double *fm3, *fm2, *fm1, *fp1, *fp2;
160 double f1[nvars], f2[nvars], f3[nvars];
161 if ( ((mpi->
ip[dir] == 0 ) && (indexI[dir] == 0 ))
162 || ((mpi->
ip[dir] == mpi->
iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
164 _ArrayAXBYCZ_(f1,(2*one_sixth),fm3,(-7*one_sixth) ,fm2,(11*one_sixth) ,fm1,nvars);
165 _ArrayAXBYCZ_(f2,(-one_sixth) ,fm2,(5*one_sixth) ,fm1,(2*one_sixth) ,fp1,nvars);
166 _ArrayAXBYCZ_(f3,(2*one_sixth),fm1,(5*one_sixth) ,fp1,(-one_sixth) ,fp2,nvars);
169 _ArrayAXBY_(f1,(one_sixth) ,fm2,(5*one_sixth),fm1,nvars);
170 _ArrayAXBY_(f2,(5*one_sixth),fm1,(one_sixth) ,fp1,nvars);
171 _ArrayAXBY_(f3,(one_sixth) ,fm1,(5*one_sixth),fp1,nvars);
175 double *w1, *w2, *w3;
180 if ( ((mpi->
ip[dir] == 0 ) && (indexI[dir] == 0 ))
181 || ((mpi->
ip[dir] == mpi->
iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
187 _ArrayAXBY_ ((A+Nsys*indexI[dir]+sys*nvars),(2*one_third) ,w1,(one_third) ,w2,nvars);
188 _ArrayAXBYCZ_ ((B+Nsys*indexI[dir]+sys*nvars),(one_third) ,w1,(2*one_third),w2,(2*one_third),w3,nvars);
191 _ArrayAXBY_ ((C+Nsys*indexI[dir]+sys*nvars),(2*one_third) ,w1,(one_third) ,w2,nvars);
192 _ArrayAXBYCZ_ ((B+Nsys*indexI[dir]+sys*nvars),(one_third) ,w1,(2*one_third),w2,(2*one_third),w3,nvars);
213 double *sendbuf = compact->
sendbuf;
214 double *recvbuf = compact->
recvbuf;
215 MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
216 if (mpi->
ip[dir])
for (d=0; d<Nsys; d++) sendbuf[d] = R[d];
217 if (mpi->
ip[dir] != mpi->
iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->
ip[dir]+1,214,mpi->
comm[dir],&req[0]);
218 if (mpi->
ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->
ip[dir]-1,214,mpi->
comm[dir],&req[1]);
219 MPI_Status status_arr[2];
220 MPI_Waitall(2,&req[0],status_arr);
221 if (mpi->
ip[dir] != mpi->
iproc[dir]-1)
for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d];
226 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI) 227 for (sys=0; sys < N_outer; sys++) {
230 for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
232 _ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars);
int Interp1PrimFifthOrderCRWENO(double *fI, double *fC, double *u, double *x, int upw, int dir, void *s, void *m, int uflag)
5th order CRWENO reconstruction (component-wise) on a uniform grid
MPI related function definitions.
Contains function definitions for common mathematical functions.
#define _ArrayAXBY_(z, a, x, b, y, size)
Header file for TridiagLU.
Some basic definitions and macros.
#define _ArrayIndexnD_(N, index, imax, i, ghost)
#define _ArrayScaleCopy1D_(x, a, y, size)
Structure containing all solver-specific variables and functions.
#define _ArrayAXBYCZ_(w, a, x, b, y, c, z, size)
Contains structure definition for hypar.
#define _ArrayIndex1D_(N, imax, i, ghost, index)
Structure of variables/parameters needed by the WENO-type scheme.
Structure of variables/parameters needed by the compact schemes.
#define _ArraySetValue_(x, size, value)
int tridiagLU(double *, double *, double *, double *, int, int, void *, void *)
Structure of MPI-related variables.
Definitions for the functions computing the interpolated value of the primitive at the cell interface...
#define _ArrayCopy1D_(x, y, size)
Contains macros and function definitions for common array operations.
#define _ArrayProduct1D_(x, size, p)
#define _ArrayMultiply3Add1D_(x, a, b, c, d, e, f, size)