HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
Interp1PrimFifthOrderCRWENO.c
Go to the documentation of this file.
1 
6 #include <stdio.h>
7 #include <basic.h>
8 #include <arrayfunctions.h>
9 #include <mathfunctions.h>
10 #include <interpolation.h>
11 #include <tridiagLU.h>
12 #include <mpivars.h>
13 #include <hypar.h>
14 
15 #ifdef with_omp
16 #include <omp.h>
17 #endif
18 
19 #undef _MINIMUM_GHOSTS_
20 
24 #define _MINIMUM_GHOSTS_ 3
25 
79  double *fI,
80  double *fC,
81  double *u,
82  double *x,
83  int upw,
84  int dir,
85  void *s,
86  void *m,
87  int uflag
88  )
89 {
90  HyPar *solver = (HyPar*) s;
91  MPIVariables *mpi = (MPIVariables*) m;
92  CompactScheme *compact= (CompactScheme*) solver->compact;
93  WENOParameters *weno = (WENOParameters*) solver->interp;
94  TridiagLU *lu = (TridiagLU*) solver->lusolver;
95  int sys,Nsys,d;
97 
98  int ghosts = solver->ghosts;
99  int ndims = solver->ndims;
100  int nvars = solver->nvars;
101  int *dim = solver->dim_local;
102  int *stride= solver->stride_with_ghosts;
103 
104  /* define some constants */
105  static const double one_third = 1.0/3.0;
106  static const double one_sixth = 1.0/6.0;
107 
108  double *ww1, *ww2, *ww3;
109  ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
110  ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
111  ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
112 
113  /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along
114  dimension "dir" */
115  int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
116  _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
117  _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
118  int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer);
119 
120  /* calculate total number of tridiagonal systems to solve */
121  _ArrayProduct1D_(bounds_outer,ndims,Nsys); Nsys *= nvars;
122 
123  /* Allocate arrays for tridiagonal system */
124  double *A = compact->A;
125  double *B = compact->B;
126  double *C = compact->C;
127  double *R = compact->R;
128 
129 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
130  for (sys=0; sys < N_outer; sys++) {
131  _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
132  _ArrayCopy1D_(index_outer,indexC,ndims);
133  _ArrayCopy1D_(index_outer,indexI,ndims);
134  for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
135  int qm1,qm2,qm3,qp1,qp2,p;
136  _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
137  if (upw > 0) {
138  indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
139  qm3 = qm1 - 2*stride[dir];
140  qm2 = qm1 - stride[dir];
141  qp1 = qm1 + stride[dir];
142  qp2 = qm1 + 2*stride[dir];
143  } else {
144  indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
145  qm3 = qm1 + 2*stride[dir];
146  qm2 = qm1 + stride[dir];
147  qp1 = qm1 - stride[dir];
148  qp2 = qm1 - 2*stride[dir];
149  }
150 
151  /* Defining stencil points */
152  double *fm3, *fm2, *fm1, *fp1, *fp2;
153  fm3 = fC+qm3*nvars;
154  fm2 = fC+qm2*nvars;
155  fm1 = fC+qm1*nvars;
156  fp1 = fC+qp1*nvars;
157  fp2 = fC+qp2*nvars;
158 
159  /* Candidate stencils and their optimal weights*/
160  double f1[nvars], f2[nvars], f3[nvars];
161  if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
162  || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
163  /* Use WENO5 at the physical boundaries */
164  _ArrayAXBYCZ_(f1,(2*one_sixth),fm3,(-7*one_sixth) ,fm2,(11*one_sixth) ,fm1,nvars);
165  _ArrayAXBYCZ_(f2,(-one_sixth) ,fm2,(5*one_sixth) ,fm1,(2*one_sixth) ,fp1,nvars);
166  _ArrayAXBYCZ_(f3,(2*one_sixth),fm1,(5*one_sixth) ,fp1,(-one_sixth) ,fp2,nvars);
167  } else {
168  /* CRWENO5 at the interior points */
169  _ArrayAXBY_(f1,(one_sixth) ,fm2,(5*one_sixth),fm1,nvars);
170  _ArrayAXBY_(f2,(5*one_sixth),fm1,(one_sixth) ,fp1,nvars);
171  _ArrayAXBY_(f3,(one_sixth) ,fm1,(5*one_sixth),fp1,nvars);
172  }
173 
174  /* retrieve the WENO weights */
175  double *w1, *w2, *w3;
176  w1 = (ww1+p*nvars);
177  w2 = (ww2+p*nvars);
178  w3 = (ww3+p*nvars);
179 
180  if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
181  || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
182  _ArraySetValue_ ((A+Nsys*indexI[dir]+sys*nvars),nvars,0.0)
183  _ArraySetValue_ ((B+Nsys*indexI[dir]+sys*nvars),nvars,1.0)
184  _ArraySetValue_ ((C+Nsys*indexI[dir]+sys*nvars),nvars,0.0)
185  } else {
186  if (upw > 0) {
187  _ArrayAXBY_ ((A+Nsys*indexI[dir]+sys*nvars),(2*one_third) ,w1,(one_third) ,w2,nvars);
188  _ArrayAXBYCZ_ ((B+Nsys*indexI[dir]+sys*nvars),(one_third) ,w1,(2*one_third),w2,(2*one_third),w3,nvars);
189  _ArrayScaleCopy1D_(w3,(one_third),(C+Nsys*indexI[dir]+sys*nvars),nvars);
190  } else {
191  _ArrayAXBY_ ((C+Nsys*indexI[dir]+sys*nvars),(2*one_third) ,w1,(one_third) ,w2,nvars);
192  _ArrayAXBYCZ_ ((B+Nsys*indexI[dir]+sys*nvars),(one_third) ,w1,(2*one_third),w2,(2*one_third),w3,nvars);
193  _ArrayScaleCopy1D_(w3,(one_third),(A+Nsys*indexI[dir]+sys*nvars),nvars);
194  }
195  }
196  _ArrayMultiply3Add1D_ ((R+Nsys*indexI[dir]+sys*nvars),w1,f1,w2,f2,w3,f3,nvars);
197  }
198  }
199 
200 #ifdef serial
201 
202  /* Solve the tridiagonal system */
203  IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,NULL); CHECKERR(ierr);
204 
205 #else
206 
207  /* Solve the tridiagonal system */
208  /* all processes except the last will solve without the last interface to avoid overlap */
209  if (mpi->ip[dir] != mpi->iproc[dir]-1) { IERR tridiagLU(A,B,C,R,dim[dir] ,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); }
210  else { IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); }
211 
212  /* Now get the solution to the last interface from the next proc */
213  double *sendbuf = compact->sendbuf;
214  double *recvbuf = compact->recvbuf;
215  MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
216  if (mpi->ip[dir]) for (d=0; d<Nsys; d++) sendbuf[d] = R[d];
217  if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]);
218  if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]);
219  MPI_Status status_arr[2];
220  MPI_Waitall(2,&req[0],status_arr);
221  if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d];
222 
223 #endif
224 
225  /* save the solution to fI */
226 #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
227  for (sys=0; sys < N_outer; sys++) {
228  _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
229  _ArrayCopy1D_(index_outer,indexI,ndims);
230  for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
231  int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
232  _ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars);
233  }
234  }
235 
236  return(0);
237 }
void * interp
Definition: hypar.h:362
Header file for TridiagLU.
MPI_Comm * comm
#define _ArraySetValue_(x, size, value)
Definitions for the functions computing the interpolated value of the primitive at the cell interface...
#define _ArrayAXBY_(z, a, x, b, y, size)
#define _ArrayIndexnD_(N, index, imax, i, ghost)
#define _ArrayScaleCopy1D_(x, a, y, size)
int tridiagLU(double *, double *, double *, double *, int, int, void *, void *)
Definition: tridiagLU.c:83
int * dim_local
Definition: hypar.h:37
MPI related function definitions.
double * recvbuf
Structure of variables/parameters needed by the WENO-type scheme.
Structure of variables/parameters needed by the compact schemes.
int ghosts
Definition: hypar.h:52
#define _ArrayIndex1D_(N, imax, i, ghost, index)
#define _ArrayMultiply3Add1D_(x, a, b, c, d, e, f, size)
void * compact
Definition: hypar.h:364
Contains function definitions for common mathematical functions.
int * stride_with_ghosts
Definition: hypar.h:414
#define _ArrayCopy1D_(x, y, size)
int nvars
Definition: hypar.h:29
#define CHECKERR(ierr)
Definition: basic.h:18
#define _ArrayAXBYCZ_(w, a, x, b, y, c, z, size)
int Interp1PrimFifthOrderCRWENO(double *, double *, double *, double *, int, int, void *, void *, int)
5th order CRWENO reconstruction (component-wise) on a uniform grid
Contains structure definition for hypar.
Some basic definitions and macros.
int ndims
Definition: hypar.h:26
Contains macros and function definitions for common array operations.
#define IERR
Definition: basic.h:16
#define _ArrayProduct1D_(x, size, p)
Structure of MPI-related variables.
Structure containing all solver-specific variables and functions.
Definition: hypar.h:23
double * sendbuf
void * lusolver
Definition: hypar.h:368
#define _DECLARE_IERR_
Definition: basic.h:17