Actual source code: plexland.c

petsc-3.15.0 2021-03-30
Report Typos and Errors
  1: #include <petsc/private/dmpleximpl.h>
  2: #include <petsclandau.h>
  3: #include <petscts.h>
  4: #include <petscdmforest.h>

  6: /* Landau collision operator */
  7: #define PETSC_THREAD_SYNC
  8: #define PETSC_DEVICE_FUNC_DECL static
  9: #include "land_tensors.h"

 11: /* vector padding not supported */
 12: #define LANDAU_VL  1

 14: int LandauGetIPDataSize(const LandauIPData *const d) {
 15:   return d->nip_*(1 + d->dim_ + d->ns_); /* assumes Nq == Nd */
 16: }

 18: static PetscErrorCode LandauPointDataCreate(LandauIPData *IPData, PetscInt dim, PetscInt nip, PetscInt Ns)
 19: {
 20:   PetscErrorCode  ierr;
 21:   PetscInt        sz, nip_pad = nip ; /* LANDAU_VL*(nip/LANDAU_VL + !!(nip%LANDAU_VL)); */
 22:   LandauIPReal    *pdata;
 24:   IPData->dim_ = dim;
 25:   IPData->nip_ = nip_pad;
 26:   IPData->ns_  = Ns;
 27:   sz = LandauGetIPDataSize(IPData);
 28:   PetscMalloc(sizeof(LandauIPReal)*sz,&pdata);
 29:   /* pack data */
 30:   IPData->w    = pdata + 0; /* w */
 31:   IPData->x    = pdata + 1*nip_pad;
 32:   IPData->y    = pdata + 2*nip_pad;
 33:   IPData->z    = pdata + 3*nip_pad;
 34:   IPData->coefs= pdata + (dim+1)*nip_pad;
 35:   return(0);
 36: }
 37: static PetscErrorCode LandauGPUDataDestroy(void *ptr)
 38: {
 39:   P4estVertexMaps *maps = (P4estVertexMaps *)ptr;
 40:   PetscErrorCode  ierr;
 42:   if (maps->deviceType != LANDAU_CPU) {
 43: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
 44:     if (maps->deviceType == LANDAU_KOKKOS) {
 45:       LandauKokkosDestroyMatMaps(maps); // imples Kokkos does
 46:     } // else could be CUDA
 47: #elif defined(PETSC_HAVE_CUDA)
 48:     if (maps->deviceType == LANDAU_CUDA){
 49:       LandauCUDADestroyMatMaps(maps);
 50:     } else SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %D ?????",maps->deviceType);
 51: #endif
 52:   }
 53:   PetscFree(maps->c_maps);
 54:   PetscFree(maps->gIdx);
 55:   PetscFree(maps);
 56:   return(0);
 57: }
 58: static PetscErrorCode LandauPointDataDestroy(LandauIPData *IPData)
 59: {
 60:   PetscErrorCode   ierr;
 62:   PetscFree(IPData->w);
 63:   return(0);
 64: }
 65: /* ------------------------------------------------------------------- */
 66: /*
 67:  LandauFormJacobian_Internal - Evaluates Jacobian matrix.

 69:  Input Parameters:
 70:  .  globX - input vector
 71:  .  actx - optional user-defined context
 72:  .  dim - dimension

 74:  Output Parameters:
 75:  .  J0acP - Jacobian matrix filled, not created
 76:  */
 77: static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
 78: {
 79:   LandauCtx         *ctx = (LandauCtx*)a_ctx;
 80:   PetscErrorCode    ierr;
 81:   PetscInt          cStart, cEnd, elemMatSize;
 82:   DM                plex = NULL;
 83:   PetscDS           prob;
 84:   PetscSection      section,globsection;
 85:   PetscInt          numCells,totDim,ej,Nq,*Nbf,*Ncf,Nb,Ncx,Nf,d,f,fieldA,qj;
 86:   PetscQuadrature   quad;
 87:   PetscTabulation   *Tf;
 88:   PetscReal         nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
 89:   const PetscReal   *quadWeights;
 90:   PetscReal         *invJ,*invJ_a=NULL,*mass_w=NULL;
 91:   PetscReal         invMass[LANDAU_MAX_SPECIES],Eq_m[LANDAU_MAX_SPECIES],m_0=ctx->m_0; /* normalize mass -- not needed! */
 92:   PetscLogDouble    flops;
 93:   Vec               locX;
 94:   LandauIPData      IPData;
 95:   PetscContainer    container;
 96:   P4estVertexMaps   *maps=NULL;


103:   /* check for matrix container for GPU assembly */
104:   PetscLogEventBegin(ctx->events[10],0,0,0,0);
105:   PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);
106:   if (container /* && ctx->deviceType != LANDAU_CPU */) {
107:     if (!ctx->gpu_assembly) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"GPU matrix container but no GPU assembly");
108:     PetscContainerGetPointer(container, (void **) &maps);
109:     if (!maps) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container");
110:   }
111:   DMConvert(ctx->dmv, DMPLEX, &plex);
112:   DMCreateLocalVector(plex, &locX);
113:   VecZeroEntries(locX); /* zero BCs so don't set */
114:   DMGlobalToLocalBegin(plex, a_X, INSERT_VALUES, locX);
115:   DMGlobalToLocalEnd  (plex, a_X, INSERT_VALUES, locX);
116:   DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd);
117:   DMGetLocalSection(plex, &section);
118:   DMGetGlobalSection(plex, &globsection);
119:   DMGetDS(plex, &prob);
120:   PetscDSGetTabulation(prob, &Tf); // Bf, &Df
121:   PetscDSGetDimensions(prob, &Nbf); Nb = Nbf[0]; /* number of vertices*S */
122:   PetscSectionGetNumFields(section, &Nf);         if (Nf!=ctx->num_species) SETERRQ1(ctx->comm, PETSC_ERR_PLIB, "Nf %D != S",Nf);
123:   PetscDSGetComponents(prob, &Ncf); Ncx = Ncf[0]; if (Ncx!=1) SETERRQ1(ctx->comm, PETSC_ERR_PLIB, "Nc %D != 1",Ncx);
124:   if (shift==0.0) {
125:     for (fieldA=0;fieldA<Nf;fieldA++) {
126:       invMass[fieldA] = m_0/ctx->masses[fieldA];
127:       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
128:       if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
129:       nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/m_0)*m_0/ctx->masses[fieldA];
130:       nu_beta[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
131:     }
132:   }
133:   PetscDSGetTotalDimension(prob, &totDim);
134:   numCells = cEnd - cStart;
135:   PetscFEGetQuadrature(ctx->fe[0], &quad);
136:   PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights);
137:   if (Nb!=Nq) SETERRQ4(ctx->comm, PETSC_ERR_PLIB, "Nb!=Nq %D %D over integration or simplices? Tf[0]->Nb=%D dim=%D",Nb,Nq,Tf[0]->Nb,dim);
138:   if (Nq >LANDAU_MAX_NQ) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ);
139:   if (LANDAU_DIM != dim) SETERRQ2(ctx->comm, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim,LANDAU_DIM);
140:   if (shift==0.0) {
141:     MatZeroEntries(JacP);
142:     flops = (PetscLogDouble)numCells*(PetscLogDouble)Nq*(PetscLogDouble)(5*dim*dim*Nf*Nf + 165);
143:   } else {
144:   flops = (PetscLogDouble)numCells*(PetscLogDouble)Nq*(PetscLogDouble)(5*dim*dim*Nf*Nf);
145:   }
146:   elemMatSize = totDim*totDim;
147:   PetscLogEventEnd(ctx->events[10],0,0,0,0);
148:   {
149:     static int         cc = 0;
150:     /* collect f data */
151:     if (ctx->verbose > 1 || (ctx->verbose > 0 && cc++ == 0)) {
152:       PetscInt N,Nloc;
153:       MatGetSize(JacP,&N,NULL);
154:       VecGetSize(locX,&Nloc);
155:       PetscPrintf(ctx->comm,"[%D]%s: %D IPs, %D cells, totDim=%D, Nb=%D, Nq=%D, elemMatSize=%D, dim=%D, Tab: Nb=%D Nf=%D Np=%D cdim=%D N=%D N+hang=%D, shift=%g\n",
156:                          0,"FormLandau",Nq*numCells,numCells, totDim, Nb, Nq, elemMatSize, dim, Tf[0]->Nb, Nf, Tf[0]->Np, Tf[0]->cdim, N, Nloc, shift);
157:     }
158:     if (shift==0.0) {
159:       LandauPointDataCreate(&IPData, dim, Nq*numCells, Nf);
160:       PetscMalloc1(numCells*Nq*dim*dim,&invJ_a);
161:       PetscLogEventBegin(ctx->events[7],0,0,0,0);
162:     } else { // mass
163:       PetscMalloc1(numCells*Nq,&mass_w);
164:       IPData.w = NULL;
165:       PetscLogEventBegin(ctx->events[1],0,0,0,0);
166:     }
167:     /* cache geometry and x, f and df/dx at IPs */
168:     for (ej = 0 ; ej < numCells; ++ej) {
169:       PetscReal    vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM];
170:       PetscScalar *coef = NULL;
171:       invJ = invJ_a ? invJ_a + ej * Nq*dim*dim : NULL;
172:       DMPlexComputeCellGeometryFEM(plex, cStart+ej, quad, vj, Jdummy, invJ, detJj);
173:       if (shift!=0.0) { // mass
174:         for (qj = 0; qj < Nq; ++qj) {
175:           PetscInt         gidx = (ej*Nq + qj);
176:           mass_w[gidx] = detJj[qj] * quadWeights[qj];
177:           if (dim==2) mass_w[gidx] *=  2.*PETSC_PI*vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
178:         }
179:       } else {
180:         DMPlexVecGetClosure(plex, section, locX, cStart+ej, NULL, &coef);
181:         PetscMemcpy(&IPData.coefs[ej*Nb*Nf],coef,Nb*Nf*sizeof(PetscScalar)); /* change if LandauIPReal != PetscScalar */
182:         /* create point data for cell i for Landau tensor: x, f(x), grad f(x) */
183:         for (qj = 0; qj < Nq; ++qj) {
184:           PetscInt         gidx = (ej*Nq + qj);
185:           IPData.x[gidx] = vj[qj * dim + 0]; /* coordinate */
186:           IPData.y[gidx] = vj[qj * dim + 1];
187:           if (dim==3) IPData.z[gidx] = vj[qj * dim + 2];
188:           IPData.w[gidx] = detJj[qj] * quadWeights[qj];
189:           if (dim==2) IPData.w[gidx] *= IPData.x[gidx];  /* cylindrical coordinate, w/o 2pi */
190:         } /* q */
191:         DMPlexVecRestoreClosure(plex, section, locX, cStart+ej, NULL, &coef);
192:       }
193:     } /* ej */
194:     if (shift==0.0) {
195:       PetscLogEventEnd(ctx->events[7],0,0,0,0);
196:     } else { // mass
197:       PetscLogEventEnd(ctx->events[1],0,0,0,0);
198:     }
199:   }
200:   DMRestoreLocalVector(plex, &locX);

202:   /* do it */
203:   if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
204:     if (ctx->deviceType == LANDAU_CUDA) {
205: #if defined(PETSC_HAVE_CUDA)
206:       LandauCUDAJacobian(plex,Nq,nu_alpha,nu_beta,invMass,Eq_m,&IPData,invJ_a,mass_w,shift,ctx->events,JacP);
207: #else
208:       SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
209: #endif
210:     } else if (ctx->deviceType == LANDAU_KOKKOS) {
211: #if defined(PETSC_HAVE_KOKKOS)
212:       LandauKokkosJacobian(plex,Nq,nu_alpha,nu_beta,invMass,Eq_m,&IPData,invJ_a,ctx->subThreadBlockSize,mass_w,shift,ctx->events,JacP);
213: #else
214:       SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
215: #endif
216:     }
217:   } else { /* CPU version */
218:     PetscInt                ei, qi;
219:     PetscScalar             *elemMat;
220:     PetscReal               *ff, *dudx, *dudy, *dudz;
221:     const PetscReal * const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
222:     if (shift!=0.0) { // mass
223:       PetscMalloc1(elemMatSize, &elemMat);
224:     } else {
225:       PetscLogEventBegin(ctx->events[8],0,0,0,0);
226:       PetscMalloc5(elemMatSize, &elemMat, IPData.nip_*Nf, &ff,IPData.nip_*Nf, &dudx, IPData.nip_*Nf, &dudy, dim==3 ? IPData.nip_*Nf : 0, &dudz);
227:       /* compute f and df */
228:       for (ei = cStart, invJ = invJ_a; ei < cEnd; ++ei, invJ += Nq*dim*dim) {
229:         LandauIPReal  *coef = &IPData.coefs[ei*Nb*Nf];
230:         PetscScalar   u_x[LANDAU_MAX_SPECIES][LANDAU_DIM];
231:         /* get f and df */
232:         for (qi = 0; qi < Nq; ++qi) {
233:           const PetscReal  *Bq = &BB[qi*Nb];
234:           const PetscReal  *Dq = &DD[qi*Nb*dim];
235:           const PetscInt   gidx = ei*Nq + qi;
236:           /* get f & df */
237:           for (f = 0; f < Nf; ++f) {
238:             PetscInt    b, e;
239:             PetscScalar refSpaceDer[LANDAU_DIM];
240:             ff[gidx + f*IPData.nip_] = 0.0;
241:             for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
242:             for (b = 0; b < Nb; ++b) {
243:               const PetscInt    cidx = b;
244:               ff[gidx + f*IPData.nip_] += Bq[cidx]*coef[f*Nb+cidx];
245:               for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx*dim+d]*coef[f*Nb+cidx];
246:             }
247:             for (d = 0; d < dim; ++d) {
248:               for (e = 0, u_x[f][d] = 0.0; e < dim; ++e) {
249:                 u_x[f][d] += invJ[qi * dim * dim + e*dim+d]*refSpaceDer[e];
250:               }
251:             }
252:           }
253:           for (f=0;f<Nf;f++) {
254:             dudx[gidx + f*IPData.nip_] = PetscRealPart(u_x[f][0]);
255:             dudy[gidx + f*IPData.nip_] = PetscRealPart(u_x[f][1]);
256: #if LANDAU_DIM==3
257:             dudz[gidx + f*IPData.nip_] = PetscRealPart(u_x[f][2]);
258: #endif
259:           }
260:         }
261:       }
262:       PetscLogEventEnd(ctx->events[8],0,0,0,0);
263:     }
264:     for (ej = cStart; ej < cEnd; ++ej) {
265:       PetscLogEventBegin(ctx->events[3],0,0,0,0);
266:       PetscMemzero(elemMat, totDim *totDim * sizeof(PetscScalar));
267:       PetscLogEventEnd(ctx->events[3],0,0,0,0);
268:       PetscLogEventBegin(ctx->events[4],0,0,0,0);
269:       PetscLogFlops((PetscLogDouble)Nq*flops);
270:       invJ = invJ_a ? invJ_a + ej * Nq*dim*dim : NULL;
271:       for (qj = 0; qj < Nq; ++qj) {
272:         const PetscReal * const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
273:         PetscReal               g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM];
274:         PetscInt                d,d2,dp,d3,ipidx,fieldA;
275:         const PetscInt          jpidx = Nq*(ej-cStart) + qj;
276:         if (shift==0.0) {
277:           const PetscReal * const invJj = &invJ[qj*dim*dim];
278:           PetscReal               gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
279:           const PetscReal         vj[3] = {IPData.x[jpidx], IPData.y[jpidx], IPData.z ? IPData.z[jpidx] : 0}, wj = IPData.w[jpidx];

281:           // create g2 & g3
282:           for (d=0;d<dim;d++) { // clear accumulation data D & K
283:             gg2_temp[d] = 0;
284:             for (d2=0;d2<dim;d2++) gg3_temp[d][d2] = 0;
285:           }
286:           for (ipidx = 0; ipidx < IPData.nip_; ipidx++) {
287:             const PetscReal wi = IPData.w[ipidx], x = IPData.x[ipidx], y = IPData.y[ipidx];
288:             PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
289: #if LANDAU_DIM==2
290:             PetscReal       Ud[2][2], Uk[2][2];
291:             LandauTensor2D(vj, x, y, Ud, Uk, (ipidx==jpidx) ? 0. : 1.);
292: #else
293:             PetscReal U[3][3], z = IPData.z[ipidx];
294:             LandauTensor3D(vj, x, y, z, U, (ipidx==jpidx) ? 0. : 1.);
295: #endif
296:             for (fieldA = 0; fieldA < Nf; ++fieldA) {
297:               temp1[0] += dudx[ipidx + fieldA*IPData.nip_]*nu_beta[fieldA]*invMass[fieldA];
298:               temp1[1] += dudy[ipidx + fieldA*IPData.nip_]*nu_beta[fieldA]*invMass[fieldA];
299: #if LANDAU_DIM==3
300:               temp1[2] += dudz[ipidx + fieldA*IPData.nip_]*nu_beta[fieldA]*invMass[fieldA];
301: #endif
302:               temp2    += ff[ipidx + fieldA*IPData.nip_]*nu_beta[fieldA];
303:             }
304:             temp1[0] *= wi;
305:             temp1[1] *= wi;
306: #if LANDAU_DIM==3
307:             temp1[2] *= wi;
308: #endif
309:             temp2    *= wi;
310: #if LANDAU_DIM==2
311:             for (d2 = 0; d2 < 2; d2++) {
312:               for (d3 = 0; d3 < 2; ++d3) {
313:                 /* K = U * grad(f): g2=e: i,A */
314:                 gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
315:                 /* D = -U * (I \kron (fx)): g3=f: i,j,A */
316:                 gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
317:               }
318:             }
319: #else
320:             for (d2 = 0; d2 < 3; ++d2) {
321:               for (d3 = 0; d3 < 3; ++d3) {
322:                 /* K = U * grad(f): g2 = e: i,A */
323:                 gg2_temp[d2] += U[d2][d3]*temp1[d3];
324:                 /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
325:                 gg3_temp[d2][d3] += U[d2][d3]*temp2;
326:               }
327:             }
328: #endif
329:           } /* IPs */
330:           //if (ej==0) printf("\t:%d.%d) temp gg3=%e %e %e %e\n",ej,qj,gg3_temp[0][0],gg3_temp[1][0],gg3_temp[0][1],gg3_temp[1][1]);
331:           // add alpha and put in gg2/3
332:           for (fieldA = 0; fieldA < Nf; ++fieldA) {
333:             for (d2 = 0; d2 < dim; d2++) {
334:               gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA];
335:               for (d3 = 0; d3 < dim; d3++) {
336:                 gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA]*invMass[fieldA];
337:               }
338:             }
339:           }
340:           /* add electric field term once per IP */
341:           for (fieldA = 0; fieldA < Nf; ++fieldA) {
342:             gg2[fieldA][dim-1] += Eq_m[fieldA];
343:           }
344:           /* Jacobian transform - g2, g3 */
345:           for (fieldA = 0; fieldA < Nf; ++fieldA) {
346:             for (d = 0; d < dim; ++d) {
347:               g2[fieldA][d] = 0.0;
348:               for (d2 = 0; d2 < dim; ++d2) {
349:                 g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2];
350:                 g3[fieldA][d][d2] = 0.0;
351:                 for (d3 = 0; d3 < dim; ++d3) {
352:                   for (dp = 0; dp < dim; ++dp) {
353:                     g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp];
354:                   }
355:                 }
356:                 g3[fieldA][d][d2] *= wj;
357:               }
358:               g2[fieldA][d] *= wj;
359:             }
360:           }
361:         } else { // mass
362:           /* Jacobian transform - g0 */
363:           for (fieldA = 0; fieldA < Nf; ++fieldA) {
364:             g0[fieldA] = mass_w[jpidx] * shift; // move this to below and remove g0
365:           }
366:         }
367:         /* FE matrix construction */
368:         {
369:           PetscInt  fieldA,d,f,d2,g;
370:           const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
371:           /* assemble - on the diagonal (I,I) */
372:           for (fieldA = 0; fieldA < Nf ; fieldA++) {
373:             for (f = 0; f < Nb ; f++) {
374:               const PetscInt i = fieldA*Nb + f; /* Element matrix row */
375:               for (g = 0; g < Nb; ++g) {
376:                 const PetscInt j    = fieldA*Nb + g; /* Element matrix column */
377:                 const PetscInt fOff = i*totDim + j;
378:                 if (shift==0.0) {
379:                   for (d = 0; d < dim; ++d) {
380:                     elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g];
381:                     for (d2 = 0; d2 < dim; ++d2) {
382:                       elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2];
383:                     }
384:                   }
385:                 } else { // mass
386:                   elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g];
387:                 }
388:               }
389:             }
390:           }
391:         }
392:       } /* qj loop */
393:       PetscLogEventEnd(ctx->events[4],0,0,0,0);
394:       /* assemble matrix */
395:       PetscLogEventBegin(ctx->events[6],0,0,0,0);
396:       if (!maps) {
397:         DMPlexMatSetClosure(plex, section, globsection, JacP, ej, elemMat, ADD_VALUES);
398:       } else {  // GPU like assembly for debugging
399:         PetscInt      fieldA,idx,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE],cols0[LANDAU_MAX_Q_FACE],rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
400:         PetscScalar   vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE],row_scale[LANDAU_MAX_Q_FACE],col_scale[LANDAU_MAX_Q_FACE];
401:         /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
402:         for (fieldA = 0; fieldA < Nf ; fieldA++) {
403:           LandauIdx *const Idxs = &maps->gIdx[ej-cStart][fieldA][0];
404:           for (f = 0; f < Nb ; f++) {
405:             idx = Idxs[f];
406:             if (idx >= 0) {
407:               nr = 1;
408:               rows0[0] = idx;
409:               row_scale[0] = 1.;
410:             } else {
411:               idx = -idx - 1;
412:               nr = maps->num_face;
413:               for (q = 0; q < maps->num_face; q++) {
414:                 rows0[q]     = maps->c_maps[idx][q].gid;
415:                 row_scale[q] = maps->c_maps[idx][q].scale;
416:               }
417:             }
418:             for (g = 0; g < Nb; ++g) {
419:               idx = Idxs[g];
420:               if (idx >= 0) {
421:                 nc = 1;
422:                 cols0[0] = idx;
423:                 col_scale[0] = 1.;
424:               } else {
425:                 idx = -idx - 1;
426:                 nc = maps->num_face;
427:                 for (q = 0; q < maps->num_face; q++) {
428:                   cols0[q]     = maps->c_maps[idx][q].gid;
429:                   col_scale[q] = maps->c_maps[idx][q].scale;
430:                 }
431:               }
432:               const PetscInt    i = fieldA*Nb + f; /* Element matrix row */
433:               const PetscInt    j = fieldA*Nb + g; /* Element matrix column */
434:               const PetscScalar Aij = elemMat[i*totDim + j];
435:               for (q = 0; q < nr; q++) rows[q] = rows0[q];
436:               for (q = 0; q < nc; q++) cols[q] = cols0[q];
437:               for (q = 0; q < nr; q++) {
438:                 for (d = 0; d < nc; d++) {
439:                   vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij;
440:                 }
441:               }
442:               MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES);
443:             }
444:           }
445:         }
446:       }
447:       if (ej==-3) {
448:         PetscErrorCode    ierr2;
449:         ierr2 = PetscPrintf(ctx->comm,"CPU Element matrix\n");CHKERRQ(ierr2);
450:         for (d = 0; d < totDim; ++d){
451:           for (f = 0; f < totDim; ++f) {ierr2 = PetscPrintf(ctx->comm," %12.5e",  PetscRealPart(elemMat[d*totDim + f]));CHKERRQ(ierr2);}
452:           ierr2 = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr2);
453:         }
454:         exit(12);
455:       }
456:       PetscLogEventEnd(ctx->events[6],0,0,0,0);
457:     } /* ej cells loop, not cuda */
458:     if (shift!=0.0) { // mass
459:       PetscFree(elemMat);
460:     } else {
461:       PetscFree5(elemMat, ff, dudx, dudy, dudz);
462:     }
463:   } /* CPU version */
464:   /* assemble matrix or vector */
465:   MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY);
466:   MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY);
467: #define MAP_BF_SIZE (128*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES)
468:   if (ctx->gpu_assembly && !container) {
469:     PetscScalar             elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat;
470:     pointInterpolationP4est pointMaps[MAP_BF_SIZE][LANDAU_MAX_Q_FACE];
471:     PetscInt                q,eidx,fieldA;
472:     MatType                 type;
473:     PetscInfo1(JacP, "Make GPU maps %D\n",1);
474:     MatGetType(JacP,&type);
475:     PetscLogEventBegin(ctx->events[2],0,0,0,0);
476:     PetscMalloc(sizeof(P4estVertexMaps), &maps);
477:     PetscContainerCreate(PETSC_COMM_SELF, &container);
478:     PetscContainerSetPointer(container, (void *)maps);
479:     PetscContainerSetUserDestroy(container, LandauGPUDataDestroy);
480:     PetscObjectCompose((PetscObject) JacP, "assembly_maps", (PetscObject) container);
481:     PetscContainerDestroy(&container);
482:     // make maps
483:     maps->data = NULL;
484:     maps->num_elements = numCells;
485:     maps->num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q
486:     maps->num_face = (PetscInt)(pow(maps->num_face,(double)(dim-1))+.001); // Q^2
487:     maps->num_reduced = 0;
488:     maps->deviceType = ctx->deviceType;
489:     // count reduced and get
490:     PetscMalloc(maps->num_elements * sizeof *maps->gIdx, &maps->gIdx);
491:     for (fieldA=0;fieldA<Nf;fieldA++) {
492:       for (ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx) {
493:         for (q = 0; q < Nb; ++q) {
494:           PetscInt    numindices,*indices;
495:           PetscScalar *valuesOrig = elMat = elemMatrix;
496:           PetscMemzero(elMat, totDim*totDim*sizeof(PetscScalar));
497:           elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1;
498:           DMPlexGetClosureIndices(plex, section, globsection, ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
499:           for (f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal
500:             if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) {
501:               // found it
502:               if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) {
503:                 maps->gIdx[eidx][fieldA][q] = (LandauIdx)indices[f]; // normal vertex 1.0
504:               } else { //found a constraint
505:                 int       jj = 0;
506:                 PetscReal sum = 0;
507:                 const PetscInt ff = f;
508:                 maps->gIdx[eidx][fieldA][q] = -maps->num_reduced - 1; // gid = -(idx+1): idx = -gid - 1
509:                 do {  // constraints are continous in Plex - exploit that here
510:                   int ii;
511:                   for (ii = 0, pointMaps[maps->num_reduced][jj].scale = 0; ii < maps->num_face; ii++) { // DMPlex puts them all together
512:                     if (ff + ii < numindices) {
513:                       pointMaps[maps->num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]);
514:                     }
515:                   }
516:                   sum += pointMaps[maps->num_reduced][jj].scale;
517:                   if (pointMaps[maps->num_reduced][jj].scale == 0) pointMaps[maps->num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps -- all contiguous???
518:                   else                                             pointMaps[maps->num_reduced][jj].gid = indices[f];
519:                 } while (++jj < maps->num_face && ++f < numindices); // jj is incremented if we hit the end
520:                 while (jj++ < maps->num_face) {
521:                   pointMaps[maps->num_reduced][jj].scale = 0;
522:                   pointMaps[maps->num_reduced][jj].gid = -1;
523:                 }
524:                 if (PetscAbs(sum-1.0)>PETSC_MACHINE_EPSILON*2.0) { // debug
525:                   int       d,f;
526:                   PetscReal tmp = 0;
527:                   PetscPrintf(PETSC_COMM_SELF,"\t\t%D.%D.%D) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%D)\n",eidx,q,fieldA,tmp,LANDAU_MAX_Q_FACE,maps->num_face);
528:                   for (d = 0, tmp = 0; d < numindices; ++d){
529:                     if (tmp!=0 && PetscAbs(tmp-1.0)>2*PETSC_MACHINE_EPSILON) PetscPrintf(PETSC_COMM_WORLD,"%3D) %3D: ",d,indices[d]);
530:                     for (f = 0; f < numindices; ++f) {
531:                       tmp += PetscRealPart(elMat[d*numindices + f]);
532:                     }
533:                     if (tmp!=0) PetscPrintf(ctx->comm," | %22.16e\n",tmp);
534:                   }
535:                 }
536:                 maps->num_reduced++;
537:                 if (maps->num_reduced>=MAP_BF_SIZE) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->num_reduced %d > %d",maps->num_reduced,MAP_BF_SIZE);
538:               }
539:               break;
540:             }
541:           }
542:           // cleanup
543:           DMPlexRestoreClosureIndices(plex, section, globsection, ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
544:           if (elMat != valuesOrig) {DMRestoreWorkArray(plex, numindices*numindices, MPIU_SCALAR, &elMat);}
545:         }
546:       }
547:     }
548:     // allocate and copy point datamaps->gIdx[eidx][field][q] -- for CPU version of this code, for debugging
549:     PetscMalloc(maps->num_reduced * sizeof *maps->c_maps, &maps->c_maps);
550:     for (ej = 0; ej < maps->num_reduced; ++ej) {
551:       for (q = 0; q < maps->num_face; ++q) {
552:         maps->c_maps[ej][q].scale = pointMaps[ej][q].scale;
553:         maps->c_maps[ej][q].gid = pointMaps[ej][q].gid;
554:       }
555:     }
556: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
557:     if (ctx->deviceType == LANDAU_KOKKOS) {
558:       LandauKokkosCreateMatMaps(maps, pointMaps,Nf,Nq); // imples Kokkos does
559:     } // else could be CUDA
560: #endif
561: #if defined(PETSC_HAVE_CUDA)
562:     if (ctx->deviceType == LANDAU_CUDA){
563:       LandauCUDACreateMatMaps(maps, pointMaps,Nf,Nq);
564:     }
565: #endif
566:     PetscLogEventEnd(ctx->events[2],0,0,0,0);
567:   }
568:   /* clean up */
569:   DMDestroy(&plex);
570:   if (shift==0.0) {
571:     LandauPointDataDestroy(&IPData);
572:     PetscFree(invJ_a);
573:   } else {
574:     PetscFree(mass_w);
575:   }
576:   return(0);
577: }

579: #if defined(LANDAU_ADD_BCS)
580: static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux,
581:                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
582:                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
583:                     PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
584: {
585:   uexact[0] = 0;
586: }
587: #endif

589: #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }}
590: static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y,
591:                           PetscReal *outX, PetscReal *outY)
592: {
593:   PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact;
594:   if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
595:     *outX = x; *outY = y;
596:   } else {
597:     const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr;
598:     PetscReal       cth,sth,xyprime[2],Rth[2][2],rotcos,newrr;
599:     if (num_sections==2) {
600:       rotcos = 0.70710678118654;
601:       outfact = 1.5; efact = 2.5;
602:       /* rotate normalized vector into [-pi/4,pi/4) */
603:       if (sinphi >= 0.) {         /* top cell, -pi/2 */
604:         cth = 0.707106781186548; sth = -0.707106781186548;
605:       } else {                    /* bottom cell -pi/8 */
606:         cth = 0.707106781186548; sth = .707106781186548;
607:       }
608:     } else if (num_sections==3) {
609:       rotcos = 0.86602540378443;
610:       outfact = 1.5; efact = 2.5;
611:       /* rotate normalized vector into [-pi/6,pi/6) */
612:       if (sinphi >= 0.5) {         /* top cell, -pi/3 */
613:         cth = 0.5; sth = -0.866025403784439;
614:       } else if (sinphi >= -.5) {  /* mid cell 0 */
615:         cth = 1.; sth = .0;
616:       } else { /* bottom cell +pi/3 */
617:         cth = 0.5; sth = 0.866025403784439;
618:       }
619:     } else if (num_sections==4) {
620:       rotcos = 0.9238795325112;
621:       outfact = 1.5; efact = 3;
622:       /* rotate normalized vector into [-pi/8,pi/8) */
623:       if (sinphi >= 0.707106781186548) {         /* top cell, -3pi/8 */
624:         cth = 0.38268343236509; sth = -0.923879532511287;
625:       } else if (sinphi >= 0.) {                 /* mid top cell -pi/8 */
626:         cth = 0.923879532511287; sth = -.38268343236509;
627:       } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
628:         cth = 0.923879532511287; sth = 0.38268343236509;
629:       } else {                                   /* bottom cell + 3pi/8 */
630:         cth = 0.38268343236509; sth = .923879532511287;
631:       }
632:     } else {
633:       cth = 0.; sth = 0.; rotcos = 0; efact = 0;
634:     }
635:     Rth[0][0] = cth; Rth[0][1] =-sth;
636:     Rth[1][0] = sth; Rth[1][1] = cth;
637:     MATVEC2(Rth,xy,xyprime);
638:     if (num_sections==2) {
639:       newrr = xyprime[0]/rotcos;
640:     } else {
641:       PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin;
642:       PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax;
643:       newrr = rin + routfrac*nroutmax;
644:     }
645:     *outX = cosphi*newrr; *outY = sinphi*newrr;
646:     /* grade */
647:     PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
648:     if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */
649:     else {         rs = r1; re = r2; fact = efact;} /* electron zone */
650:     tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr;
651:     *outX *= tt;
652:     *outY *= tt;
653:   }
654: }

656: static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
657: {
658:   LandauCtx   *ctx = (LandauCtx*)a_ctx;
659:   PetscReal   r = abc[0], z = abc[1];
660:   if (ctx->inflate) {
661:     PetscReal absR, absZ;
662:     absR = PetscAbs(r);
663:     absZ = PetscAbs(z);
664:     CircleInflate(ctx->i_radius,ctx->e_radius,ctx->radius,ctx->num_sections,absR,absZ,&absR,&absZ);
665:     r = (r > 0) ? absR : -absR;
666:     z = (z > 0) ? absZ : -absZ;
667:   }
668:   xyz[0] = r;
669:   xyz[1] = z;
670:   if (dim==3) xyz[2] = abc[2];

672:   return(0);
673: }

675: static PetscErrorCode ErrorIndicator_Simple(PetscInt dim, PetscReal volume, PetscReal x[], PetscInt Nc, const PetscInt Nf[], const PetscScalar u[], const PetscScalar u_x[], PetscReal *error, void *actx)
676: {
677:   PetscReal err = 0.0;
678:   PetscInt  f = *(PetscInt*)actx, j;
680:   for (j = 0; j < dim; ++j) {
681:     err += PetscSqr(PetscRealPart(u_x[f*dim+j]));
682:   }
683:   err = PetscRealPart(u[f]); /* just use rho */
684:   *error = volume * err; /* * (ctx->axisymmetric ? 2.*PETSC_PI * r : 1); */
685:   return(0);
686: }

688: static PetscErrorCode LandauDMCreateVMesh(MPI_Comm comm, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM *dm)
689: {
691:   PetscReal      radius = ctx->radius;
692:   size_t         len;
693:   char           fname[128] = ""; /* we can add a file if we want */

696:   /* create DM */
697:   PetscStrlen(fname, &len);
698:   if (len) {
699:     PetscInt dim2;
700:     DMPlexCreateFromFile(comm, fname, ctx->interpolate, dm);
701:     DMGetDimension(*dm, &dim2);
702:     if (LANDAU_DIM != dim2) SETERRQ2(comm, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim2,LANDAU_DIM);
703:   } else {    /* p4est, quads */
704:     /* Create plex mesh of Landau domain */
705:     if (!ctx->sphere) {
706:       PetscInt       cells[] = {2,2,2};
707:       PetscReal      lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius};
708:       DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
709:       if (dim==2) { lo[0] = 0; cells[0] = 1; }
710:       DMPlexCreateBoxMesh(comm, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, dm);
711:       DMLocalizeCoordinates(*dm); /* needed for periodic */
712:       if (dim==3) {PetscObjectSetName((PetscObject) *dm, "cube");}
713:       else {PetscObjectSetName((PetscObject) *dm, "half-plane");}
714:     } else if (dim==2) {
715:       PetscInt       numCells,cells[16][4],i,j;
716:       PetscInt       numVerts;
717:       PetscReal      inner_radius1 = ctx->i_radius, inner_radius2 = ctx->e_radius;
718:       PetscReal      *flatCoords = NULL;
719:       PetscInt       *flatCells = NULL, *pcell;
720:       if (ctx->num_sections==2) {
721: #if 1
722:         numCells = 5;
723:         numVerts = 10;
724:         int cells2[][4] = { {0,1,4,3},
725:                             {1,2,5,4},
726:                             {3,4,7,6},
727:                             {4,5,8,7},
728:                             {6,7,8,9} };
729:         for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
730:         PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
731:         {
732:           PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
733:           for (j = 0; j < numVerts-1; j++) {
734:             PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2;
735:             PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius;
736:             z = rad * PetscSinReal(theta);
737:             coords[j][1] = z;
738:             r = rad * PetscCosReal(theta);
739:             coords[j][0] = r;
740:           }
741:           coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
742:         }
743: #else
744:         numCells = 4;
745:         numVerts = 8;
746:         static int     cells2[][4] = {{0,1,2,3},
747:                                       {4,5,1,0},
748:                                       {5,6,2,1},
749:                                       {6,7,3,2}};
750:         for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
751:         loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
752:         {
753:           PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
754:           PetscInt j;
755:           for (j = 0; j < 8; j++) {
756:             PetscReal z, r;
757:             PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.;
758:             PetscReal rad = ctx->radius * ((j < 4) ? 0.5 : 1.0);
759:             z = rad * PetscSinReal(theta);
760:             coords[j][1] = z;
761:             r = rad * PetscCosReal(theta);
762:             coords[j][0] = r;
763:           }
764:         }
765: #endif
766:       } else if (ctx->num_sections==3) {
767:         numCells = 7;
768:         numVerts = 12;
769:         int cells2[][4] = { {0,1,5,4},
770:                             {1,2,6,5},
771:                             {2,3,7,6},
772:                             {4,5,9,8},
773:                             {5,6,10,9},
774:                             {6,7,11,10},
775:                             {8,9,10,11} };
776:         for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
777:         PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
778:         {
779:           PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
780:           for (j = 0; j < numVerts; j++) {
781:             PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3;
782:             PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius;
783:             z = rad * PetscSinReal(theta);
784:             coords[j][1] = z;
785:             r = rad * PetscCosReal(theta);
786:             coords[j][0] = r;
787:           }
788:         }
789:       } else if (ctx->num_sections==4) {
790:         numCells = 10;
791:         numVerts = 16;
792:         int cells2[][4] = { {0,1,6,5},
793:                             {1,2,7,6},
794:                             {2,3,8,7},
795:                             {3,4,9,8},
796:                             {5,6,11,10},
797:                             {6,7,12,11},
798:                             {7,8,13,12},
799:                             {8,9,14,13},
800:                             {10,11,12,15},
801:                             {12,13,14,15}};
802:         for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
803:         PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
804:         {
805:           PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
806:           for (j = 0; j < numVerts-1; j++) {
807:             PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4;
808:             PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius;
809:             z = rad * PetscSinReal(theta);
810:             coords[j][1] = z;
811:             r = rad * PetscCosReal(theta);
812:             coords[j][0] = r;
813:           }
814:           coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
815:         }
816:       } else {
817:         numCells = 0;
818:         numVerts = 0;
819:       }
820:       for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
821:         pcell[0] = cells[j][0]; pcell[1] = cells[j][1];
822:         pcell[2] = cells[j][2]; pcell[3] = cells[j][3];
823:       }
824:       DMPlexCreateFromCellListPetsc(comm,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,dm);
825:       PetscFree2(flatCoords,flatCells);
826:       PetscObjectSetName((PetscObject) *dm, "semi-circle");
827:     } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
828:   }
829:   PetscObjectSetOptionsPrefix((PetscObject)*dm,prefix);

831:   DMSetFromOptions(*dm); /* Plex refine */

833:   { /* p4est? */
834:     char      convType[256];
835:     PetscBool flg;
836:     PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
837:     PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (should not be Plex!)","plexland.c",DMList,DMPLEX,convType,256,&flg);
838:     PetscOptionsEnd();
839:     if (flg) {
840:       DM dmforest;
841:       DMConvert(*dm,convType,&dmforest);
842:       if (dmforest) {
843:         PetscBool isForest;
844:         if (dmforest->prealloc_only != (*dm)->prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP,"plex->prealloc_only != dm->prealloc_only");
845:         PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix);
846:         DMIsForest(dmforest,&isForest);
847:         if (isForest) {
848:           if (ctx->sphere && ctx->inflate) {
849:             DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx);
850:           }
851:           if (dmforest->prealloc_only != (*dm)->prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP,"plex->prealloc_only != dm->prealloc_only");
852:           DMDestroy(dm);
853:           *dm = dmforest;
854:           ctx->errorIndicator = ErrorIndicator_Simple; /* flag for Forest */
855:         } else SETERRQ(ctx->comm, PETSC_ERR_USER, "Converted to non Forest?");
856:       } else SETERRQ(ctx->comm, PETSC_ERR_USER, "Convert failed?");
857:     }
858:   }
859:   PetscObjectSetName((PetscObject) *dm, "Mesh");
860:   return(0);
861: }

863: static PetscErrorCode SetupDS(DM dm, PetscInt dim, LandauCtx *ctx)
864: {
865:   PetscErrorCode  ierr;
866:   PetscInt        ii;
868:   for (ii=0;ii<ctx->num_species;ii++) {
869:     char     buf[256];
870:     if (ii==0) PetscSNPrintf(buf, 256, "e");
871:     else {PetscSNPrintf(buf, 256, "i%D", ii);}
872:     /* Setup Discretization - FEM */
873:     PetscFECreateDefault(PetscObjectComm((PetscObject) dm), dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]);
874:     PetscObjectSetName((PetscObject) ctx->fe[ii], buf);
875:     DMSetField(dm, ii, NULL, (PetscObject) ctx->fe[ii]);
876:   }
877:   DMCreateDS(dm);
878:   if (1) {
879:     PetscInt        ii;
880:     PetscSection    section;
881:     DMGetSection(dm, &section);
882:     for (ii=0;ii<ctx->num_species;ii++){
883:       char buf[256];
884:       if (ii==0) PetscSNPrintf(buf, 256, "se");
885:       else PetscSNPrintf(buf, 256, "si%D", ii);
886:       PetscSectionSetComponentName(section, ii, 0, buf);
887:     }
888:   }
889:   return(0);
890: }

892: /* Define a Maxwellian function for testing out the operator. */

894: /* Using cartesian velocity space coordinates, the particle */
895: /* density, [1/m^3], is defined according to */

897: /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */

899: /* Using some constant, c, we normalize the velocity vector into a */
900: /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */

902: /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */

904: /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
905: /* for finding the particle within the interval in a box dx^3 around x is */

907: /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */

909: typedef struct {
910:   LandauCtx   *ctx;
911:   PetscReal kT_m;
912:   PetscReal n;
913:   PetscReal shift;
914: } MaxwellianCtx;

916: static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
917: {
918:   MaxwellianCtx *mctx = (MaxwellianCtx*)actx;
919:   LandauCtx     *ctx = mctx->ctx;
920:   PetscInt      i;
921:   PetscReal     v2 = 0, theta = 2*mctx->kT_m/(ctx->v_0*ctx->v_0); /* theta = 2kT/mc^2 */
923:   /* compute the exponents, v^2 */
924:   for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
925:   /* evaluate the Maxwellian */
926:   u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
927:   if (mctx->shift!=0.) {
928:     v2 = 0;
929:     for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i];
930:     v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift);
931:     /* evaluate the shifted Maxwellian */
932:     u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
933:   }
934:   return(0);
935: }

937: /*@
938:  LandauAddMaxwellians - Add a Maxwellian distribution to a state

940:  Collective on X

942:  Input Parameters:
943:  .   dm - The mesh
944:  +   time - Current time
945:  -   temps - Temperatures of each species
946:  .   ns - Number density of each species
947:  +   actx - Landau context

949:  Output Parameter:
950:  .   X  - The state

952:  Level: beginner

954:  .keywords: mesh
955:  .seealso: LandauCreateVelocitySpace()
956:  @*/
957: PetscErrorCode LandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], void *actx)
958: {
959:   LandauCtx      *ctx = (LandauCtx*)actx;
960:   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *);
961:   PetscErrorCode ierr,ii;
962:   PetscInt       dim;
963:   MaxwellianCtx  *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];

966:   DMGetDimension(dm, &dim);
967:   if (!ctx) { DMGetApplicationContext(dm, &ctx); }
968:   for (ii=0;ii<ctx->num_species;ii++) {
969:     mctxs[ii] = &data[ii];
970:     data[ii].ctx = ctx;
971:     data[ii].kT_m = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */
972:     data[ii].n = ns[ii];
973:     initu[ii] = maxwellian;
974:     data[ii].shift = 0;
975:   }
976:   data[0].shift = ctx->electronShift;
977:   /* need to make ADD_ALL_VALUES work - TODO */
978:   DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X);
979:   return(0);
980: }

982: /*
983:  LandauSetInitialCondition - Addes Maxwellians with context

985:  Collective on X

987:  Input Parameters:
988:  .   dm - The mesh
989:  +   actx - Landau context with T and n

991:  Output Parameter:
992:  .   X  - The state

994:  Level: beginner

996:  .keywords: mesh
997:  .seealso: LandauCreateVelocitySpace(), LandauAddMaxwellians()
998:  */
999: static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, void *actx)
1000: {
1001:   LandauCtx        *ctx = (LandauCtx*)actx;
1004:   if (!ctx) { DMGetApplicationContext(dm, &ctx); }
1005:   VecZeroEntries(X);
1006:   LandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, ctx);
1007:   return(0);
1008: }

1010: static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscReal refineTol[], PetscReal coarsenTol[], PetscInt type, LandauCtx *ctx, DM *newDM)
1011: {
1012:   DM               dm, plex, adaptedDM = NULL;
1013:   PetscDS          prob;
1014:   PetscBool        isForest;
1015:   PetscQuadrature  quad;
1016:   PetscInt         Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1017:   DMLabel          adaptLabel = NULL;
1018:   PetscErrorCode   ierr;

1021:   VecGetDM(sol, &dm);
1022:   DMCreateDS(dm);
1023:   DMGetDS(dm, &prob);
1024:   DMGetDimension(dm, &dim);
1025:   DMIsForest(dm, &isForest);
1026:   DMConvert(dm, DMPLEX, &plex);
1027:   DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1028:   DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel);
1029:   PetscFEGetQuadrature(fem, &quad);
1030:   PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);
1031:   if (Nq >LANDAU_MAX_NQ) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ);
1032:   PetscDSGetDimensions(prob, &Nb);
1033:   if (type==4) {
1034:     for (c = cStart; c < cEnd; c++) {
1035:       DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1036:     }
1037:     PetscInfo1(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM");
1038:   } else if (type==2) {
1039:     PetscInt  rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2;
1040:     PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1041:     for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1042:     for (c = cStart; c < cEnd; c++) {
1043:       PetscReal    tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ];
1044:       DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ);
1045:       for (qj = 0; qj < Nq; ++qj) {
1046:         tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0));
1047:         r = PetscSqrtReal(tt);
1048:         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) {
1049:           minRad = r;
1050:           nr = 0;
1051:           rCellIdx[nr++]= c;
1052:           PetscInfo4(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %D, qp %D/%D\n", r, c, qj+1, Nq);
1053:         } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) {
1054:           for (k=0;k<nr;k++) if (c == rCellIdx[k]) break;
1055:           if (k==nr) {
1056:             rCellIdx[nr++]= c;
1057:             PetscInfo5(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %D, qp %D/%D, d=%e\n", r, c, qj+1, Nq, r-minRad);
1058:           }
1059:         }
1060:         if (ctx->sphere) {
1061:           if ((tt=r-ctx->e_radius) > 0) {
1062:             PetscInfo2(sol, "\t\t\t %D cell r=%g\n",c,tt);
1063:             if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) {
1064:               eMinRad = tt;
1065:               eMaxIdx = 0;
1066:               eCellIdx[eMaxIdx++] = c;
1067:             } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) {
1068:               eCellIdx[eMaxIdx++] = c;
1069:             }
1070:           }
1071:           if ((tt=r-ctx->i_radius) > 0) {
1072:             if (tt < iMinRad - 1.e-5) {
1073:               iMinRad = tt;
1074:               iMaxIdx = 0;
1075:               iCellIdx[iMaxIdx++] = c;
1076:             } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) {
1077:               iCellIdx[iMaxIdx++] = c;
1078:             }
1079:           }
1080:         }
1081:       }
1082:     }
1083:     for (k=0;k<nr;k++) {
1084:       DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE);
1085:     }
1086:     if (ctx->sphere) {
1087:       for (c = 0; c < eMaxIdx; c++) {
1088:         DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE);
1089:         PetscInfo3(sol, "\t\tPhase:%s: refine sphere e cell %D r=%g\n","adaptToleranceFEM",eCellIdx[c],eMinRad);
1090:       }
1091:       for (c = 0; c < iMaxIdx; c++) {
1092:         DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE);
1093:         PetscInfo3(sol, "\t\tPhase:%s: refine sphere i cell %D r=%g\n","adaptToleranceFEM",iCellIdx[c],iMinRad);
1094:       }
1095:     }
1096:     PetscInfo4(sol, "Phase:%s: Adaptive refine origin cells %D,%D r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],minRad);
1097:   } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */
1098:     PetscScalar  *coef = NULL;
1099:     Vec          coords;
1100:     PetscInt     csize,Nv,d,nz;
1101:     DM           cdm;
1102:     PetscSection cs;
1103:     DMGetCoordinatesLocal(dm, &coords);
1104:     DMGetCoordinateDM(dm, &cdm);
1105:     DMGetLocalSection(cdm, &cs);
1106:     for (c = cStart; c < cEnd; c++) {
1107:       PetscInt doit = 0, outside = 0;
1108:       DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef);
1109:       Nv = csize/dim;
1110:       for (nz = d = 0; d < Nv; d++) {
1111:         PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0);
1112:         x = PetscSqrtReal(x);
1113:         if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1;             /* refine origin */
1114:         else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++;   /* first pass don't refine bottom */
1115:         else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */
1116:         else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */
1117:         if (x < PETSC_MACHINE_EPSILON*10.) nz++;
1118:       }
1119:       DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef);
1120:       if (doit || (outside<Nv && nz)) {
1121:         DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1122:       }
1123:     }
1124:     PetscInfo1(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM");
1125:   }
1126:   /* VecDestroy(&locX); */
1127:   DMDestroy(&plex);
1128:   DMAdaptLabel(dm, adaptLabel, &adaptedDM);
1129:   DMLabelDestroy(&adaptLabel);
1130:   *newDM = adaptedDM;
1131:   if (adaptedDM) {
1132:     if (isForest) {
1133:       DMForestSetAdaptivityForest(adaptedDM,NULL);
1134:     }
1135:     DMConvert(adaptedDM, DMPLEX, &plex);
1136:     DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1137:     PetscInfo2(sol, "\tPhase: adaptToleranceFEM: %D cells, %d total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart));
1138:     DMDestroy(&plex);
1139:   }
1140:   return(0);
1141: }

1143: static PetscErrorCode adapt(DM *dm, LandauCtx *ctx, Vec *uu)
1144: {
1145:   PetscErrorCode  ierr;
1146:   PetscInt        type, limits[5] = {ctx->numRERefine,ctx->nZRefine1,ctx->maxRefIts,ctx->nZRefine2,ctx->postAMRRefine};
1147:   PetscInt        adaptIter;

1150:   for (type=0;type<5;type++) {
1151:     for (adaptIter = 0; adaptIter<limits[type];adaptIter++) {
1152:       DM  dmNew = NULL;
1153:       adaptToleranceFEM(ctx->fe[0], *uu, ctx->refineTol, ctx->coarsenTol, type, ctx, &dmNew);
1154:       if (!dmNew) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"should not happen");
1155:       else {
1156:         DMDestroy(dm);
1157:         VecDestroy(uu);
1158:         DMCreateGlobalVector(dmNew,uu);
1159:         PetscObjectSetName((PetscObject) *uu, "u");
1160:         LandauSetInitialCondition(dmNew, *uu, ctx);
1161:         *dm = dmNew;
1162:       }
1163:     }
1164:   }
1165:   return(0);
1166: }

1168: static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1169: {
1170:   PetscErrorCode    ierr;
1171:   PetscBool         flg, sph_flg;
1172:   PetscInt          ii,nt,nm,nc;
1173:   DM                dummy;

1176:   DMCreate(ctx->comm,&dummy);
1177:   /* get options - initialize context */
1178:   ctx->normJ = 0;
1179:   ctx->verbose = 1;
1180:   ctx->interpolate = PETSC_TRUE;
1181:   ctx->gpu_assembly = PETSC_TRUE;
1182:   ctx->sphere = PETSC_FALSE;
1183:   ctx->inflate = PETSC_FALSE;
1184:   ctx->electronShift = 0;
1185:   ctx->errorIndicator = NULL;
1186:   ctx->radius = 5.; /* electron thermal radius (velocity) */
1187:   ctx->re_radius = 0.;
1188:   ctx->vperp0_radius1 = 0;
1189:   ctx->vperp0_radius2 = 0;
1190:   ctx->e_radius = .1;
1191:   ctx->i_radius = .01;
1192:   ctx->maxRefIts = 5;
1193:   ctx->postAMRRefine = 0;
1194:   ctx->nZRefine1 = 0;
1195:   ctx->nZRefine2 = 0;
1196:   ctx->numRERefine = 0;
1197:   ctx->aux_bool = PETSC_FALSE;
1198:   ctx->num_sections = 3; /* 2, 3 or 4 */
1199:   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1200:   ctx->charges[0] = -1;  /* electron charge (MKS) */
1201:   ctx->masses[0] = 1/1835.5; /* temporary value in proton mass */
1202:   ctx->n[0] = 1;
1203:   ctx->thermal_temps[0] = 1;
1204:   /* constants, etc. */
1205:   ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */
1206:   ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1207:   ctx->lnLam = 10;         /* cross section ratio large - small angle collisions */
1208:   ctx->n_0 = 1.e20;        /* typical plasma n, but could set it to 1 */
1209:   ctx->Ez = 0;
1210:   ctx->v_0 = 1; /* in electron thermal velocity */
1211:   ctx->subThreadBlockSize = 1; /* for device and maybe OMP */
1212:   ctx->numConcurrency = 1; /* for device */
1213:   PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1214:   {
1215:     char opstring[256];
1216: #if defined(PETSC_HAVE_KOKKOS)
1217:     ctx->deviceType = LANDAU_KOKKOS;
1218:     PetscStrcpy(opstring,"kokkos");
1219: #if defined(PETSC_HAVE_CUDA)
1220:     ctx->subThreadBlockSize = 8;
1221: #endif
1222: #elif defined(PETSC_HAVE_CUDA)
1223:     ctx->deviceType = LANDAU_CUDA;
1224:     PetscStrcpy(opstring,"cuda");
1225: #else
1226:     ctx->deviceType = LANDAU_CPU;
1227:     PetscStrcpy(opstring,"cpu");
1228:     ctx->subThreadBlockSize = 0;
1229: #endif
1230:     PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,256,NULL);
1231:     PetscStrcmp("cpu",opstring,&flg);
1232:     if (flg) {
1233:       ctx->deviceType = LANDAU_CPU;
1234:       ctx->subThreadBlockSize = 0;
1235:     } else {
1236:       PetscStrcmp("cuda",opstring,&flg);
1237:       if (flg) {
1238:         ctx->deviceType = LANDAU_CUDA;
1239:         ctx->subThreadBlockSize = 0;
1240:       } else {
1241:         PetscStrcmp("kokkos",opstring,&flg);
1242:         if (flg) ctx->deviceType = LANDAU_KOKKOS;
1243:         else SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring);
1244:       }
1245:     }
1246:   }
1247:   PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL);
1248:   PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL);
1249:   PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg);
1250:   PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges (no AMR)", "plexland.c", ctx->inflate, &ctx->inflate, NULL);
1251:   PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, NULL);
1252:   PetscOptionsInt("-dm_landau_amr_z_refine1",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, NULL);
1253:   PetscOptionsInt("-dm_landau_amr_z_refine2",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, NULL);
1254:   PetscOptionsInt("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin after r=0 refinements", "plexland.c", ctx->maxRefIts, &ctx->maxRefIts, NULL);
1255:   PetscOptionsInt("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &ctx->postAMRRefine, NULL);
1256:   PetscOptionsInt("-dm_landau_verbose", "", "plexland.c", ctx->verbose, &ctx->verbose, NULL);
1257:   PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg);
1258:   PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg);
1259:   PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2,&ctx->vperp0_radius2, &flg);
1260:   PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie criticle field","plexland.c",ctx->Ez,&ctx->Ez, NULL);
1261:   PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL);
1262:   PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL);
1263:   PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL);
1264:   PetscOptionsInt("-dm_landau_num_thread_teams", "The number of other concurrent runs to make room for", "plexland.c", ctx->numConcurrency, &ctx->numConcurrency, NULL);

1266:   /* get num species with tempurature*/
1267:   {
1268:     PetscReal arr[100];
1269:     nt = 100;
1270:     PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV", "plexland.c", arr, &nt, &flg);
1271:     if (flg && nt > LANDAU_MAX_SPECIES) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"-thermal_temps ,t1,t2,.. number of species %D > MAX %D",nt,LANDAU_MAX_SPECIES);
1272:   }
1273:   nt = LANDAU_MAX_SPECIES;
1274:   for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) {
1275:     ctx->thermal_temps[ii] = 1.;
1276:     ctx->charges[ii] = 1;
1277:     ctx->masses[ii] = 1;
1278:     ctx->n[ii] = (ii==1) ? 1 : 0;
1279:   }
1280:   PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg);
1281:   if (flg) {
1282:     PetscInfo1(dummy, "num_species set to number of thermal temps provided (%D)\n",nt);
1283:     ctx->num_species = nt;
1284:   } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1285:   for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1286:   nm = LANDAU_MAX_SPECIES-1;
1287:   PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg);
1288:   if (flg && nm != ctx->num_species-1) {
1289:     SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %D != num species %D",nm,ctx->num_species-1);
1290:   }
1291:   nm = LANDAU_MAX_SPECIES;
1292:   PetscOptionsRealArray("-dm_landau_n", "Normalized (by -n_0) number density of each species", "plexland.c", ctx->n, &nm, &flg);
1293:   if (flg && nm != ctx->num_species) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %D != num species %D",nm,ctx->num_species);
1294:   ctx->n_0 *= ctx->n[0]; /* normalized number density */
1295:   for (ii=1;ii<ctx->num_species;ii++) ctx->n[ii] = ctx->n[ii]/ctx->n[0];
1296:   ctx->n[0] = 1;
1297:   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1298:   ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */
1299:   ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */
1300:   PetscOptionsReal("-dm_landau_v_0","Velocity to normalize with in units of initial electrons thermal velocity (not recommended to change default)","plexland.c",ctx->v_0,&ctx->v_0, NULL);
1301:   ctx->v_0 *= PetscSqrtReal(ctx->k*ctx->thermal_temps[0]/(ctx->masses[0])); /* electron mean velocity in 1D (need 3D form in computing T from FE integral) */
1302:   nc = LANDAU_MAX_SPECIES-1;
1303:   PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg);
1304:   if (flg && nc != ctx->num_species-1) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %D != num species %D",nc,ctx->num_species-1);
1305:   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1306:   ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */
1307:   /* geometry */
1308:   for (ii=0;ii<ctx->num_species;ii++) ctx->refineTol[ii]  = PETSC_MAX_REAL;
1309:   for (ii=0;ii<ctx->num_species;ii++) ctx->coarsenTol[ii] = 0.;
1310:   ii = LANDAU_MAX_SPECIES;
1311:   PetscOptionsRealArray("-dm_landau_refine_tol","tolerance for refining cells in AMR","plexland.c",ctx->refineTol, &ii, &flg);
1312:   if (flg && ii != ctx->num_species) PetscInfo2(dummy, "Phase: Warning, #refine_tol %D != num_species %D\n",ii,ctx->num_species);
1313:   ii = LANDAU_MAX_SPECIES;
1314:   PetscOptionsRealArray("-dm_landau_coarsen_tol","tolerance for coarsening cells in AMR","plexland.c",ctx->coarsenTol, &ii, &flg);
1315:   if (flg && ii != ctx->num_species) PetscInfo2(dummy, "Phase: Warning, #coarsen_tol %D != num_species %D\n",ii,ctx->num_species);
1316:   PetscOptionsReal("-dm_landau_domain_radius","Phase space size in units of electron thermal velocity","plexland.c",ctx->radius,&ctx->radius, &flg);
1317:   if (flg && ctx->radius <= 0) { /* negative is ratio of c */
1318:     if (ctx->radius == 0) ctx->radius = 0.75;
1319:     else ctx->radius = -ctx->radius;
1320:     ctx->radius = ctx->radius*299792458.0/ctx->v_0;
1321:     PetscInfo1(dummy, "Change domain radius to %e\n",ctx->radius);
1322:   }
1323:   PetscOptionsReal("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius,&ctx->i_radius, &flg);
1324:   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an ion radius but did not set sphere, user error really */
1325:   if (!flg) {
1326:     ctx->i_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; /* normalized radius with thermal velocity of first ion */
1327:   }
1328:   PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius,&ctx->e_radius, &flg);
1329:   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1330:   if (!flg) {
1331:     ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0; /* normalized radius with thermal velocity of electrons */
1332:   }
1333:   if (ctx->sphere && (ctx->e_radius <= ctx->i_radius || ctx->radius <= ctx->e_radius)) SETERRQ3(ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",ctx->i_radius,ctx->e_radius,ctx->radius);
1334:   PetscOptionsInt("-dm_landau_sub_thread_block_size", "Number of threads in Kokkos integration point subblock", "plexland.c", ctx->subThreadBlockSize, &ctx->subThreadBlockSize, NULL);
1335:   PetscOptionsEnd();
1336:   for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii]  = ctx->charges[ii] = 0;
1337:   if (ctx->verbose > 0) {
1338:     PetscPrintf(ctx->comm, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n",ctx->masses[0],ctx->masses[1]/1.6720e-27,ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0);
1339:     PetscPrintf(ctx->comm, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", ctx->charges[0],-ctx->charges[1]/ctx->charges[0],ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0);
1340:     PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e imp=%10.3e. v_0=%10.3e n_0=%10.3e t_0=%10.3e domain=%10.3e\n",ctx->thermal_temps[0],ctx->thermal_temps[1],ctx->num_species>2 ? ctx->thermal_temps[2] : 0,ctx->v_0,ctx->n_0,ctx->t_0,ctx->radius);
1341:   }
1342:   DMDestroy(&dummy);
1343:   {
1344:     PetscMPIInt    rank;
1345:     MPI_Comm_rank(ctx->comm, &rank);
1346:     /* PetscLogStage  setup_stage; */
1347:     PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]); /* 0 */
1348:     PetscLogEventRegister(" Initialize", DM_CLASSID, &ctx->events[10]); /* 10 */
1349:     PetscLogEventRegister("  IP Data-jac", DM_CLASSID, &ctx->events[7]); /* 7 */
1350:     PetscLogEventRegister(" Kernal-init", DM_CLASSID, &ctx->events[3]); /* 3 */
1351:     PetscLogEventRegister(" GPU Kernel", DM_CLASSID, &ctx->events[4]); /* 4 */
1352:     PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]); /* 5 */
1353:     PetscLogEventRegister(" Jac-assemble", DM_CLASSID, &ctx->events[6]); /* 6 */
1354:     PetscLogEventRegister(" Jac-f-df", DM_CLASSID, &ctx->events[8]); /* 8 */
1355:     PetscLogEventRegister(" Jac asmbl setup", DM_CLASSID, &ctx->events[2]); /* 2 */
1356:     PetscLogEventRegister("Mass Operator", DM_CLASSID, &ctx->events[9]); /* 9 */
1357:     PetscLogEventRegister("  IP Data-mass", DM_CLASSID, &ctx->events[1]); /* 1 */

1359:     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1360:       PetscOptionsClearValue(NULL,"-snes_converged_reason");
1361:       PetscOptionsClearValue(NULL,"-ksp_converged_reason");
1362:       PetscOptionsClearValue(NULL,"-snes_monitor");
1363:       PetscOptionsClearValue(NULL,"-ksp_monitor");
1364:       PetscOptionsClearValue(NULL,"-ts_monitor");
1365:       PetscOptionsClearValue(NULL,"-ts_adapt_monitor");
1366:       PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view");
1367:       PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view");
1368:       PetscOptionsClearValue(NULL,"-dm_landau_pre_dm_view");
1369:       PetscOptionsClearValue(NULL,"-dm_landau_pre_vec_view");
1370:       PetscOptionsClearValue(NULL,"-info");
1371:     }
1372:   }
1373:   return(0);
1374: }

1376: /*@C
1377:  LandauCreateVelocitySpace - Create a DMPlex velocity space mesh

1379:  Collective on comm

1381:  Input Parameters:
1382:  +   comm  - The MPI communicator
1383:  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
1384:  -   prefix - prefix for options

1386:  Output Parameter:
1387:  .   dm  - The DM object representing the mesh
1388:  +   X - A vector (user destroys)
1389:  -   J - Optional matrix (object destroys)

1391:  Level: beginner

1393:  .keywords: mesh
1394:  .seealso: DMPlexCreate(), LandauDestroyVelocitySpace()
1395:  @*/
1396: PetscErrorCode LandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *dm)
1397: {
1399:   LandauCtx      *ctx;
1400:   PetscBool      prealloc_only,flg;

1403:   if (dim!=2 && dim!=3) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
1404:   ctx = (LandauCtx*)malloc(sizeof(LandauCtx));
1405:   ctx->comm = comm; /* used for diagnostics and global errors */
1406:   /* process options */
1407:   ProcessOptions(ctx,prefix);
1408:   /* Create Mesh */
1409:   LandauDMCreateVMesh(PETSC_COMM_SELF, dim, prefix, ctx, dm);
1410:   prealloc_only = (*dm)->prealloc_only;
1411:   DMViewFromOptions(*dm,NULL,"-dm_landau_pre_dm_view");
1412:   DMSetApplicationContext(*dm, ctx);
1413:   /* create FEM */
1414:   SetupDS(*dm,dim,ctx);
1415:   /* set initial state */
1416:   DMCreateGlobalVector(*dm,X);
1417:   PetscObjectSetName((PetscObject) *X, "u");
1418:   /* initial static refinement, no solve */
1419:   LandauSetInitialCondition(*dm, *X, ctx);
1420:   VecViewFromOptions(*X, NULL, "-dm_landau_pre_vec_view");
1421:   /* forest refinement */
1422:   if (ctx->errorIndicator) {
1423:     /* AMR */
1424:     adapt(dm,ctx,X);
1425:     if ((*dm)->prealloc_only != prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP,"(*dm)->prealloc_only != prealloc_only");
1426:     DMViewFromOptions(*dm,NULL,"-dm_landau_amr_dm_view");
1427:     VecViewFromOptions(*X, NULL, "-dm_landau_amr_vec_view");
1428:   }
1429:   DMSetApplicationContext(*dm, ctx);
1430:   ctx->dmv = *dm;
1431:   if (ctx->dmv->prealloc_only != prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP,"ctx->dmv->prealloc_only != prealloc_only");
1432:   DMCreateMatrix(ctx->dmv, &ctx->J);
1433:   MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
1434:   MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
1435:   if (J) *J = ctx->J;
1436:   /* check for types that we need */
1437: #if defined(PETSC_HAVE_KOKKOS)
1438:   if (ctx->deviceType == LANDAU_CPU) {
1439:     PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"");
1440:     //if (flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"with device=cpu must not use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos");
1441:   }
1442: #elif defined(PETSC_HAVE_CUDA)
1443:   if (ctx->deviceType == LANDAU_CPU) {
1444:     PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"");
1445:     //if (flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"with device=cpu must not use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda");
1446:   }
1447: #endif
1448:   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1449:     if (ctx->deviceType == LANDAU_CUDA) {
1450:       PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"");
1451:       if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda");
1452:     } else if (ctx->deviceType == LANDAU_KOKKOS) {
1453:       PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"");
1454: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1455:       if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos");
1456: #else
1457:       if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels=1' for GPU assembly and Kokkos");
1458: #endif
1459:     }
1460:   }
1461:   return(0);
1462: }

1464: /*@
1465:  LandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh

1467:  Collective on dm

1469:  Input/Output Parameters:
1470:  .   dm - the dm to destroy

1472:  Level: beginner

1474:  .keywords: mesh
1475:  .seealso: LandauCreateVelocitySpace()
1476:  @*/
1477: PetscErrorCode LandauDestroyVelocitySpace(DM *dm)
1478: {
1479:   PetscErrorCode ierr,ii;
1480:   LandauCtx      *ctx;
1481:   PetscContainer container = NULL;
1483:   DMGetApplicationContext(*dm, &ctx);
1484:   PetscObjectQuery((PetscObject)ctx->J,"coloring", (PetscObject*)&container);
1485:   if (container) {
1486:     PetscContainerDestroy(&container);
1487:   }
1488:   MatDestroy(&ctx->M);
1489:   MatDestroy(&ctx->J);
1490:   for (ii=0;ii<ctx->num_species;ii++) {
1491:     PetscFEDestroy(&ctx->fe[ii]);
1492:   }
1493:   free(ctx);
1494:   DMDestroy(dm);
1495:   return(0);
1496: }

1498: /* < v, ru > */
1499: static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1500:                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1501:                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1502:                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1503: {
1504:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1505:   f0[0] = u[ii];
1506: }

1508: /* < v, ru > */
1509: static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1510:                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1511:                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1512:                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1513: {
1514:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
1515:   f0[0] = x[jj]*u[ii]; /* x momentum */
1516: }

1518: static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1519:                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1520:                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1521:                     PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1522: {
1523:   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
1524:   double tmp1 = 0.;
1525:   for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
1526:   f0[0] = tmp1*u[ii];
1527: }

1529: /* < v, ru > */
1530: static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1531:                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1532:                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1533:                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1534: {
1535:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1536:   f0[0] = 2.*PETSC_PI*x[0]*u[ii];
1537: }

1539: /* < v, ru > */
1540: static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1541:                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1542:                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1543:                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1544: {
1545:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1546:   f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii];
1547: }

1549: static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1550:                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1551:                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1552:                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1553: {
1554:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1555:   f0[0] =  2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii];
1556: }

1558: /*@
1559:  LandauPrintNorms - collects moments and prints them

1561:  Collective on dm

1563:  Input Parameters:
1564:  +   X  - the state
1565:  -   stepi - current step to print

1567:  Level: beginner

1569:  .keywords: mesh
1570:  .seealso: LandauCreateVelocitySpace()
1571:  @*/
1572: PetscErrorCode LandauPrintNorms(Vec X, PetscInt stepi)
1573: {
1575:   LandauCtx      *ctx;
1576:   PetscDS        prob;
1577:   DM             plex,dm;
1578:   PetscInt       cStart, cEnd, dim, ii;
1579:   PetscScalar    xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES];
1580:   PetscScalar    xmomentum[LANDAU_MAX_SPECIES],  ymomentum[LANDAU_MAX_SPECIES],  zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];

1583:   VecGetDM(X, &dm);
1584:   if (!dm) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no DM");
1585:   DMGetDimension(dm, &dim);
1586:   DMGetApplicationContext(dm, &ctx);
1587:   if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
1588:   DMConvert(ctx->dmv, DMPLEX, &plex);
1589:   DMCreateDS(plex);
1590:   DMGetDS(plex, &prob);
1591:   /* print momentum and energy */
1592:   for (ii=0;ii<ctx->num_species;ii++) {
1593:     PetscScalar user[2] = { (PetscScalar)ii, (PetscScalar)ctx->charges[ii]};
1594:     PetscDSSetConstants(prob, 2, user);
1595:     if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */
1596:       PetscDSSetObjective(prob, 0, &f0_s_rden);
1597:       DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1598:       density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
1599:       PetscDSSetObjective(prob, 0, &f0_s_rmom);
1600:       DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1601:       zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1602:       PetscDSSetObjective(prob, 0, &f0_s_rv2);
1603:       DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1604:       energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
1605:       zmomentumtot += zmomentum[ii];
1606:       energytot  += energy[ii];
1607:       densitytot += density[ii];
1608:       PetscPrintf(ctx->comm, "%3D) species-%D: charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));
1609:     } else { /* 2/3X + 3V */
1610:       PetscDSSetObjective(prob, 0, &f0_s_den);
1611:       DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1612:       density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
1613:       PetscDSSetObjective(prob, 0, &f0_s_mom);
1614:       user[1] = 0;
1615:       DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1616:       xmomentum[ii]  = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1617:       user[1] = 1;
1618:       DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1619:       ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1620:       user[1] = 2;
1621:       DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1622:       zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1623:       PetscDSSetObjective(prob, 0, &f0_s_v2);
1624:       DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1625:       energy[ii]    = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
1626:       PetscPrintf(ctx->comm, "%3D) species %D: density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",
1627:                          stepi,ii,PetscRealPart(density[ii]),PetscRealPart(xmomentum[ii]),PetscRealPart(ymomentum[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));
1628:       xmomentumtot += xmomentum[ii];
1629:       ymomentumtot += ymomentum[ii];
1630:       zmomentumtot += zmomentum[ii];
1631:       energytot  += energy[ii];
1632:       densitytot += density[ii];
1633:     }
1634:     if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n");
1635:   }
1636:   /* totals */
1637:   DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1638:   DMDestroy(&plex);
1639:   if (ctx->num_species>1) {
1640:     if (dim==2) {
1641:       PetscPrintf(ctx->comm, "\t%3D) Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %D cells)",
1642:                          stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);
1643:     } else {
1644:       PetscPrintf(ctx->comm, "\t%3D) Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %D cells)",
1645:                          stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);
1646:     }
1647:   } else {
1648:     PetscPrintf(ctx->comm, " -- %D cells",cEnd-cStart);
1649:   }
1650:   if (ctx->verbose > 1) {PetscPrintf(ctx->comm,", %D sub (vector) threads\n",ctx->subThreadBlockSize);}
1651:   else {PetscPrintf(ctx->comm,"\n");}
1652:   return(0);
1653: }

1655: static PetscErrorCode destroy_coloring (void *is)
1656: {
1657:   ISColoring tmp = (ISColoring)is;
1658:   return ISColoringDestroy(&tmp);
1659: }

1661: /*@
1662:  LandauCreateColoring - create a coloring and add to matrix (Landau context used just for 'print' flag, should be in DMPlex)

1664:  Collective on JacP

1666:  Input Parameters:
1667:  +   JacP  - matrix to add coloring to
1668:  -   plex - The DM

1670:  Output Parameter:
1671:  .   container  - Container with coloring

1673:  Level: beginner

1675:  .keywords: mesh
1676:  .seealso: LandauCreateVelocitySpace()
1677:  @*/
1678: PetscErrorCode LandauCreateColoring(Mat JacP, DM plex, PetscContainer *container)
1679: {
1680:   PetscErrorCode  ierr;
1681:   PetscInt        dim,cell,i,ej,nc,Nv,totDim,numGCells,cStart,cEnd;
1682:   ISColoring      iscoloring = NULL;
1683:   Mat             G,Q;
1684:   PetscScalar     ones[128];
1685:   MatColoring     mc;
1686:   IS             *is;
1687:   PetscInt        csize,colour,j,k;
1688:   const PetscInt *indices;
1689:   PetscInt       numComp[1];
1690:   PetscInt       numDof[4];
1691:   PetscFE        fe;
1692:   DM             colordm;
1693:   PetscSection   csection, section, globalSection;
1694:   PetscDS        prob;
1695:   LandauCtx      *ctx;

1698:   DMGetApplicationContext(plex, &ctx);
1699:   DMGetLocalSection(plex, &section);
1700:   DMGetGlobalSection(plex, &globalSection);
1701:   DMGetDimension(plex, &dim);
1702:   DMGetDS(plex, &prob);
1703:   PetscDSGetTotalDimension(prob, &totDim);
1704:   DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1705:   numGCells = cEnd - cStart;
1706:   /* create cell centered DM */
1707:   DMClone(plex, &colordm);
1708:   PetscFECreateDefault(PetscObjectComm((PetscObject) plex), dim, 1, PETSC_FALSE, "color_", PETSC_DECIDE, &fe);
1709:   PetscObjectSetName((PetscObject) fe, "color");
1710:   DMSetField(colordm, 0, NULL, (PetscObject)fe);
1711:   PetscFEDestroy(&fe);
1712:   for (i = 0; i < (dim+1); ++i) numDof[i] = 0;
1713:   numDof[dim] = 1;
1714:   numComp[0] = 1;
1715:   DMPlexCreateSection(colordm, NULL, numComp, numDof, 0, NULL, NULL, NULL, NULL, &csection);
1716:   PetscSectionSetFieldName(csection, 0, "color");
1717:   DMSetLocalSection(colordm, csection);
1718:   DMViewFromOptions(colordm,NULL,"-color_dm_view");
1719:   /* get vertex to element map Q and colroing graph G */
1720:   MatGetSize(JacP,NULL,&Nv);
1721:   MatCreateAIJ(PETSC_COMM_SELF,PETSC_DECIDE,PETSC_DECIDE,numGCells,Nv,totDim,NULL,0,NULL,&Q);
1722:   for (i=0;i<128;i++) ones[i] = 1.0;
1723:   for (cell = cStart, ej = 0 ; cell < cEnd; ++cell, ++ej) {
1724:     PetscInt numindices,*indices;
1725:     DMPlexGetClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, NULL);
1726:     if (numindices>128) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "too many indices. %D > %D",numindices,128);
1727:     MatSetValues(Q,1,&ej,numindices,indices,ones,ADD_VALUES);
1728:     DMPlexRestoreClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, NULL);
1729:   }
1730:   MatAssemblyBegin(Q, MAT_FINAL_ASSEMBLY);
1731:   MatAssemblyEnd(Q, MAT_FINAL_ASSEMBLY);
1732:   MatMatTransposeMult(Q,Q,MAT_INITIAL_MATRIX,4.0,&G);
1733:   PetscObjectSetName((PetscObject) Q, "Q");
1734:   PetscObjectSetName((PetscObject) G, "coloring graph");
1735:   MatViewFromOptions(G,NULL,"-coloring_mat_view");
1736:   MatViewFromOptions(Q,NULL,"-coloring_mat_view");
1737:   MatDestroy(&Q);
1738:   /* coloring */
1739:   MatColoringCreate(G,&mc);
1740:   MatColoringSetDistance(mc,1);
1741:   MatColoringSetType(mc,MATCOLORINGJP);
1742:   MatColoringSetFromOptions(mc);
1743:   MatColoringApply(mc,&iscoloring);
1744:   MatColoringDestroy(&mc);
1745:   /* view */
1746:   ISColoringViewFromOptions(iscoloring,NULL,"-coloring_is_view");
1747:   ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&nc,&is);
1748:   if (ctx && ctx->verbose > 2) {
1749:     PetscViewer    viewer;
1750:     Vec            color_vec, eidx_vec;
1751:     DMGetGlobalVector(colordm, &color_vec);
1752:     DMGetGlobalVector(colordm, &eidx_vec);
1753:     for (colour=0; colour<nc; colour++) {
1754:       ISGetLocalSize(is[colour],&csize);
1755:       ISGetIndices(is[colour],&indices);
1756:       for (j=0; j<csize; j++) {
1757:         PetscScalar v = (PetscScalar)colour;
1758:         k = indices[j];
1759:         VecSetValues(color_vec,1,&k,&v,INSERT_VALUES);
1760:         v = (PetscScalar)k;
1761:         VecSetValues(eidx_vec,1,&k,&v,INSERT_VALUES);
1762:       }
1763:       ISRestoreIndices(is[colour],&indices);
1764:     }
1765:     /* view */
1766:     PetscViewerVTKOpen(ctx->comm, "color.vtu", FILE_MODE_WRITE, &viewer);
1767:     PetscObjectSetName((PetscObject) color_vec, "color");
1768:     VecView(color_vec, viewer);
1769:     PetscViewerDestroy(&viewer);
1770:     PetscViewerVTKOpen(ctx->comm, "eidx.vtu", FILE_MODE_WRITE, &viewer);
1771:     PetscObjectSetName((PetscObject) eidx_vec, "element-idx");
1772:     VecView(eidx_vec, viewer);
1773:     PetscViewerDestroy(&viewer);
1774:     DMRestoreGlobalVector(colordm, &color_vec);
1775:     DMRestoreGlobalVector(colordm, &eidx_vec);
1776:   }
1777:   PetscSectionDestroy(&csection);
1778:   DMDestroy(&colordm);
1779:   ISColoringRestoreIS(iscoloring,PETSC_USE_POINTER,&is);
1780:   MatDestroy(&G);
1781:   /* stash coloring */
1782:   PetscContainerCreate(PETSC_COMM_SELF, container);
1783:   PetscContainerSetPointer(*container,(void*)iscoloring);
1784:   PetscContainerSetUserDestroy(*container, destroy_coloring);
1785:   PetscObjectCompose((PetscObject)JacP,"coloring",(PetscObject)*container);
1786:   if (ctx && ctx->verbose > 0) {
1787:     PetscPrintf(ctx->comm, "Made coloring with %D colors\n", nc);
1788:   }
1789:   return(0);
1790: }

1792: PetscErrorCode LandauAssembleOpenMP(PetscInt cStart, PetscInt cEnd, PetscInt totDim, DM plex, PetscSection section, PetscSection globalSection, Mat JacP, PetscScalar elemMats[], PetscContainer container)
1793: {
1794:   PetscErrorCode  ierr;
1795:   IS             *is;
1796:   PetscInt        nc,colour,j;
1797:   const PetscInt *clr_idxs;
1798:   ISColoring      iscoloring;
1800:   PetscContainerGetPointer(container,(void**)&iscoloring);
1801:   ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&nc,&is);
1802:   for (colour=0; colour<nc; colour++) {
1803:     PetscInt    *idx_arr[1024]; /* need to make dynamic for general use */
1804:     PetscScalar *new_el_mats[1024];
1805:     PetscInt     idx_size[1024],csize;
1806:     ISGetLocalSize(is[colour],&csize);
1807:     if (csize>1024) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "too many elements in color. %D > %D",csize,1024);
1808:     ISGetIndices(is[colour],&clr_idxs);
1809:     /* get indices and mats */
1810:     for (j=0; j<csize; j++) {
1811:       PetscInt    cell = cStart + clr_idxs[j];
1812:       PetscInt    numindices,*indices;
1813:       PetscScalar *elMat = &elemMats[clr_idxs[j]*totDim*totDim];
1814:       PetscScalar *valuesOrig = elMat;
1815:       DMPlexGetClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
1816:       idx_size[j] = numindices;
1817:       PetscMalloc2(numindices,&idx_arr[j],numindices*numindices,&new_el_mats[j]);
1818:       PetscMemcpy(idx_arr[j],indices,numindices*sizeof(PetscInt));
1819:       PetscMemcpy(new_el_mats[j],elMat,numindices*numindices*sizeof(PetscScalar));
1820:       DMPlexRestoreClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
1821:       if (elMat != valuesOrig) {DMRestoreWorkArray(plex, numindices*numindices, MPIU_SCALAR, &elMat);}
1822:     }
1823:     /* assemble matrix - pragmas break CI ? */
1824:     //#pragma omp parallel default(JacP,idx_size,idx_arr,new_el_mats,colour,clr_idxs)  private(j)
1825:     //#pragma omp parallel for private(j)
1826:     for (j=0; j<csize; j++) {
1827:       PetscInt    numindices = idx_size[j], *indices = idx_arr[j];
1828:       PetscScalar *elMat = new_el_mats[j];
1829:       MatSetValues(JacP,numindices,indices,numindices,indices,elMat,ADD_VALUES);
1830:     }
1831:     /* free */
1832:     ISRestoreIndices(is[colour],&clr_idxs);
1833:     for (j=0; j<csize; j++) {
1834:       PetscFree2(idx_arr[j],new_el_mats[j]);
1835:     }
1836:   }
1837:   ISColoringRestoreIS(iscoloring,PETSC_USE_POINTER,&is);
1838:   return(0);
1839: }

1841: /* < v, u > */
1842: static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1843:                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1844:                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1845:                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1846: {
1847:   g0[0] = 1.;
1848: }

1850: /* < v, u > */
1851: static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1852:                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1853:                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1854:                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1855: {
1856:   g0[0] = 2.*PETSC_PI*x[0];
1857: }

1859: /*@
1860:  LandauCreateMassMatrix - Create mass matrix for Landau

1862:  Collective on dm

1864:  Input Parameters:
1865:  . dm     - the DM object

1867:  Output Parameters:
1868:  . Amat - The mass matrix (optional), mass matrix is added to the DM context

1870:  Level: beginner

1872:  .keywords: mesh
1873:  .seealso: LandauCreateVelocitySpace()
1874:  @*/
1875: PetscErrorCode LandauCreateMassMatrix(DM dm, Mat *Amat)
1876: {
1877:   DM             massDM;
1878:   PetscDS        prob;
1879:   PetscInt       ii,dim,N1=1,N2;
1881:   LandauCtx      *ctx;
1882:   Mat            M;

1887:   DMGetApplicationContext(dm, &ctx);
1888:   if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
1889:   DMGetDimension(dm, &dim);
1890:   DMClone(dm, &massDM);
1891:   DMCopyFields(dm, massDM);
1892:   DMCreateDS(massDM);
1893:   DMGetDS(massDM, &prob);
1894:   for (ii=0;ii<ctx->num_species;ii++) {
1895:     if (dim==3) {PetscDSSetJacobian(prob, ii, ii, g0_1, NULL, NULL, NULL);}
1896:     else        {PetscDSSetJacobian(prob, ii, ii, g0_r, NULL, NULL, NULL);}
1897:   }
1898:   DMViewFromOptions(massDM,NULL,"-dm_landau_mass_dm_view");
1899:   DMCreateMatrix(massDM, &M);
1900:   MatSetOption(M, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
1901:   {
1902:     Vec locX;
1903:     DM  plex;
1904:     DMConvert(massDM, DMPLEX, &plex);
1905:     DMGetLocalVector(massDM, &locX);
1906:     /* Mass matrix is independent of the input, so no need to fill locX */
1907:     if (plex->prealloc_only != dm->prealloc_only) SETERRQ2(PetscObjectComm((PetscObject) dm), PETSC_ERR_PLIB, "plex->prealloc_only = massDM->prealloc_only %D, =%D",plex->prealloc_only,massDM->prealloc_only);
1908:     DMPlexSNESComputeJacobianFEM(plex, locX, M, M, ctx);
1909:     DMRestoreLocalVector(massDM, &locX);
1910:     DMDestroy(&plex);
1911:   }
1912:   DMDestroy(&massDM);
1913:   MatGetSize(ctx->J, &N1, NULL);
1914:   MatGetSize(M, &N2, NULL);
1915:   if (N1 != N2) SETERRQ2(PetscObjectComm((PetscObject) dm), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %D, |Mass|=%D",N1,N2);
1916:   PetscObjectSetName((PetscObject)M, "mass");
1917:   MatViewFromOptions(M,NULL,"-dm_landau_mass_mat_view");
1918:   ctx->M = M; /* this could be a noop, a = a */
1919:   if (Amat) *Amat = M;
1920:   return(0);
1921: }

1923: /*@
1924:  LandauIFunction - TS residual calculation

1926:  Collective on ts

1928:  Input Parameters:
1929:  +   TS  - The time stepping context
1930:  .   time_dummy - current time (not used)
1931:  -   X - Current state
1932:  +   X_t - Time derivative of current state
1933:  .   actx - Landau context

1935:  Output Parameter:
1936:  .   F  - The residual

1938:  Level: beginner

1940:  .keywords: mesh
1941:  .seealso: LandauCreateVelocitySpace(), LandauIJacobian()
1942:  @*/
1943: PetscErrorCode LandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
1944: {
1946:   LandauCtx      *ctx=(LandauCtx*)actx;
1947:   PetscInt       dim;
1948:   DM             dm;

1951:   TSGetDM(ts,&dm);
1952:   DMGetApplicationContext(dm, &ctx);
1953:   if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
1954:   PetscLogEventBegin(ctx->events[0],0,0,0,0);
1955:   DMGetDimension(ctx->dmv, &dim);
1956:   PetscInfo3(ts, "Create Landau Jacobian t=%g X'=%p %s\n",time_dummy,X_t,ctx->aux_bool ? " -- seems to be in line search" : "");
1957:   LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx);
1958:   ctx->aux_bool = PETSC_TRUE;
1959:   MatViewFromOptions(ctx->J,NULL,"-landau_jacobian_mat_view");
1960:   /* mat vec for op */
1961:   MatMult(ctx->J,X,F); /* C*f */
1962:   /* add time term */
1963:   if (X_t) {
1964:     MatMultAdd(ctx->M,X_t,F,F);
1965:   }
1966:   PetscLogEventEnd(ctx->events[0],0,0,0,0);
1967:   return(0);
1968: }
1969: static PetscErrorCode MatrixNfDestroy(void *ptr)
1970: {
1971:   PetscInt *nf = (PetscInt *)ptr;
1972:   PetscErrorCode  ierr;
1974:   PetscFree(nf);
1975:   return(0);
1976: }
1977: /*@
1978:  LandauIJacobian - TS Jacobian construction

1980:  Collective on ts

1982:  Input Parameters:
1983:  +   TS  - The time stepping context
1984:  .   time_dummy - current time (not used)
1985:  -   X - Current state
1986:  +   U_tdummy - Time derivative of current state (not used)
1987:  .   shift - shift for du/dt term
1988:  -   actx - Landau context

1990:  Output Parameter:
1991:  .   Amat  - Jacobian
1992:  +   Pmat  - same as Amat

1994:  Level: beginner

1996:  .keywords: mesh
1997:  .seealso: LandauCreateVelocitySpace(), LandauIFunction()
1998:  @*/
1999: PetscErrorCode LandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2000: {
2002:   LandauCtx      *ctx=(LandauCtx*)actx;
2003:   PetscInt       dim;
2004:   DM             dm;
2005:   PetscContainer container;
2007:   TSGetDM(ts,&dm);
2008:   DMGetApplicationContext(dm, &ctx);
2009:   if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2010:   if (Amat!=Pmat || Amat!=ctx->J) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2011:   DMGetDimension(ctx->dmv, &dim);
2012:   /* get collision Jacobian into A */
2013:   PetscLogEventBegin(ctx->events[9],0,0,0,0);
2014:   PetscInfo2(ts, "Adding mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift);
2015:   if (shift==0.0) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "zero shift");
2016:   if (!ctx->aux_bool) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "wrong state");
2017:   LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx);
2018:   ctx->aux_bool = PETSC_FALSE;
2019:   MatViewFromOptions(Pmat,NULL,"-landau_mat_view");
2020:   PetscLogEventEnd(ctx->events[9],0,0,0,0);
2021:   /* set number species in Jacobian */
2022:   PetscObjectQuery((PetscObject) ctx->J, "Nf", (PetscObject *) &container);
2023:   if (!container) {
2024:     PetscInt *pNf;
2025:     PetscContainerCreate(PETSC_COMM_SELF, &container);
2026:     PetscMalloc(sizeof(PetscInt), &pNf);
2027:     *pNf = ctx->num_species + 1000*ctx->numConcurrency;
2028:     PetscContainerSetPointer(container, (void *)pNf);
2029:     PetscContainerSetUserDestroy(container, MatrixNfDestroy);
2030:     PetscObjectCompose((PetscObject)ctx->J, "Nf", (PetscObject) container);
2031:     PetscContainerDestroy(&container);
2032:   }

2034:   return(0);
2035: }