HyPar  1.0
Finite-Difference Hyperbolic-Parabolic PDE Solver on Cartesian Grids
tridiagIterJacobi.c
Go to the documentation of this file.
1 
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <math.h>
9 #ifndef serial
10 #include <mpi.h>
11 #endif
12 #include <tridiagLU.h>
13 
65  double *a,
66  double *b,
67  double *c,
68  double *x,
69  int n,
70  int ns,
71  void *r,
72  void *m
73  )
74 {
75  TridiagLU *context = (TridiagLU*) r;
76  int iter,d,i,NT;
77  double norm=0,norm0=0,global_norm=0;
78 
79 #ifndef serial
80  MPI_Comm *comm = (MPI_Comm*) m;
81  int rank,nproc;
82 
83  if (comm) {
84  MPI_Comm_size(*comm,&nproc);
85  MPI_Comm_rank(*comm,&rank);
86  } else {
87  rank = 0;
88  nproc = 1;
89  }
90 #endif
91 
92  if (!context) {
93  fprintf(stderr,"Error in tridiagIterJacobi(): NULL pointer passed for parameters!\n");
94  return(-1);
95  }
96 
97  /* check for zero along the diagonal */
98  for (i=0; i<n; i++) {
99  for (d=0; d<ns; d++) {
100  if (b[i*ns+d]*b[i*ns+d] < context->atol*context->atol) {
101  fprintf(stderr,"Error in tridiagIterJacobi(): Encountered zero on main diagonal!\n");
102  return(1);
103  }
104  }
105  }
106 
107  double *rhs = (double*) calloc (ns*n, sizeof(double));
108  for (i=0; i<n; i++) {
109  for (d=0; d<ns; d++) {
110  rhs[i*ns+d] = x[i*ns+d]; /* save a copy of the rhs */
111  x[i*ns+d] /= b[i*ns+d]; /* initial guess */
112  }
113  }
114 
115  double *recvbufL, *recvbufR, *sendbufL, *sendbufR;
116  recvbufL = (double*) calloc (ns, sizeof(double));
117  recvbufR = (double*) calloc (ns, sizeof(double));
118  sendbufL = (double*) calloc (ns, sizeof(double));
119  sendbufR = (double*) calloc (ns, sizeof(double));
120 
121  /* total number of points */
122 #ifdef serial
123  if (context->evaluate_norm) NT = n;
124  else NT = 0;
125 #else
126  if (context->evaluate_norm) MPI_Allreduce(&n,&NT,1,MPI_INT,MPI_SUM,*comm);
127  else NT = 0;
128 #endif
129 
130 #ifdef serial
131  if (context->verbose) printf("\n");
132 #else
133  if (context->verbose && (!rank)) printf("\n");
134 #endif
135 
136  iter = 0;
137  while(1) {
138 
139  /* evaluate break conditions */
140  if ( (iter >= context->maxiter)
141  || (iter && context->evaluate_norm && (global_norm < context->atol))
142  || (iter && context->evaluate_norm && (global_norm/norm0 < context->rtol)) ) {
143  break;
144  }
145 
146  /* Communicate the boundary x values between processors */
147  for (d=0; d<ns; d++) recvbufL[d] = recvbufR[d] = 0;
148 #ifndef serial
149  MPI_Request req[4] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL,MPI_REQUEST_NULL,MPI_REQUEST_NULL};
150  if (rank) MPI_Irecv(recvbufL,ns,MPI_DOUBLE,rank-1,2,*comm,&req[0]);
151  if (rank != nproc-1) MPI_Irecv(recvbufR,ns,MPI_DOUBLE,rank+1,3,*comm,&req[1]);
152  for (d=0; d<ns; d++) { sendbufL[d] = x[d]; sendbufR[d] = x[(n-1)*ns+d]; }
153  if (rank) MPI_Isend(sendbufL,ns,MPI_DOUBLE,rank-1,3,*comm,&req[2]);
154  if (rank != nproc-1) MPI_Isend(sendbufR,ns,MPI_DOUBLE,rank+1,2,*comm,&req[3]);
155 #endif
156 
157  /* calculate error norm - interior */
158  if (context->evaluate_norm) {
159  norm = 0;
160  for (i=1; i<n-1; i++) {
161  for (d=0; d<ns; d++) {
162  norm += ( (a[i*ns+d]*x[(i-1)*ns+d] + b[i*ns+d]*x[i*ns+d] + c[i*ns+d]*x[(i+1)*ns+d] - rhs[i*ns+d])
163  * (a[i*ns+d]*x[(i-1)*ns+d] + b[i*ns+d]*x[i*ns+d] + c[i*ns+d]*x[(i+1)*ns+d] - rhs[i*ns+d]) );
164  }
165  }
166  }
167  /* calculate error norm - boundary */
168 #ifndef serial
169  MPI_Status status_arr[4];
170  MPI_Waitall(4,req,status_arr);
171 #endif
172  if (context->evaluate_norm) {
173  if (n > 1) {
174  for (d=0; d<ns; d++) {
175  norm += ( (a[d]*recvbufL[d] + b[d]*x[d] + c[d]*x[d+ns*1]- rhs[d])
176  * (a[d]*recvbufL[d] + b[d]*x[d] + c[d]*x[d+ns*1]- rhs[d]) );
177  }
178  for (d=0; d<ns; d++) {
179  norm += ( (a[d+ns*(n-1)]*x[d+ns*(n-2)] + b[d+ns*(n-1)]*x[d+ns*(n-1)] + c[d+ns*(n-1)]*recvbufR[d] - rhs[d+ns*(n-1)])
180  * (a[d+ns*(n-1)]*x[d+ns*(n-2)] + b[d+ns*(n-1)]*x[d+ns*(n-1)] + c[d+ns*(n-1)]*recvbufR[d] - rhs[d+ns*(n-1)]) );
181  }
182  } else {
183  for (d=0; d<ns; d++) {
184  norm += ( (a[d]*recvbufL[d] + b[d]*x[d] + c[d]*recvbufR[d] - rhs[d])
185  * (a[d]*recvbufL[d] + b[d]*x[d] + c[d]*recvbufR[d] - rhs[d]) );
186  }
187  }
188  /* sum over all processes */
189 #ifdef serial
190  global_norm = norm;
191 #else
192  MPI_Allreduce(&norm,&global_norm,1,MPI_DOUBLE,MPI_SUM,*comm);
193 #endif
194  global_norm = sqrt(global_norm/NT);
195  if (!iter) norm0 = global_norm;
196  } else {
197  norm = -1.0;
198  global_norm = -1.0;
199  }
200 
201 #ifdef serial
202  if (context->verbose)
203 #else
204  if (context->verbose && (!rank))
205 #endif
206  printf("\t\titer: %d, norm: %1.16E\n",iter,global_norm);
207 
208  /* correct the solution for this iteration */
209  if (n > 1) {
210  for (d=0; d<ns; d++) {
211  i = 0; x[i*ns+d] = (rhs[i*ns+d] - a[i*ns+d]*recvbufL[d] - c[i*ns+d]*x[d+ns*(i+1)] ) / b[i*ns+d];
212  i = n-1; x[i*ns+d] = (rhs[i*ns+d] - a[i*ns+d]*x[d+ns*(i-1)] - c[i*ns+d]*recvbufR[d]) / b[i*ns+d];
213  }
214  for (i=1; i<n-1; i++) {
215  for (d=0; d<ns; d++) {
216  x[i*ns+d] = (rhs[i*ns+d] - a[i*ns+d]*x[d+ns*(i-1)] - c[i*ns+d]*x[d+ns*(i+1)]) / b[i*ns+d];
217  }
218  }
219  } else {
220  for (d=0; d<ns; d++) x[d] = (rhs[d] - a[d]*recvbufL[d] - c[d]*recvbufR[d]) / b[d];
221  }
222 
223  /* finished with this iteration */
224  iter++;
225  }
226 
227  /* save convergence information */
228  context->exitnorm = (context->evaluate_norm ? global_norm : -1.0);
229  context->exititer = iter;
230 
231  free(rhs);
232  free(sendbufL);
233  free(sendbufR);
234  free(recvbufL);
235  free(recvbufR);
236 
237  return(0);
238 }
Header file for TridiagLU.
int verbose
Definition: tridiagLU.h:99
double exitnorm
Definition: tridiagLU.h:98
int evaluate_norm
Definition: tridiagLU.h:93
int exititer
Definition: tridiagLU.h:97
double atol
Definition: tridiagLU.h:95
int maxiter
Definition: tridiagLU.h:94
int tridiagIterJacobi(double *a, double *b, double *c, double *x, int n, int ns, void *r, void *m)