Actual source code: plexland.c
1: #include <../src/mat/impls/aij/seq/aij.h>
2: #include <petsc/private/dmpleximpl.h>
3: #include <petsclandau.h>
4: #include <petscts.h>
5: #include <petscdmforest.h>
6: #include <petscdmcomposite.h>
8: /* Landau collision operator */
10: /* relativistic terms */
11: #if defined(PETSC_USE_REAL_SINGLE)
12: #define SPEED_OF_LIGHT 2.99792458e8F
13: #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
14: #else
15: #define SPEED_OF_LIGHT 2.99792458e8
16: #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
17: #endif
19: #define PETSC_THREAD_SYNC
20: #include "land_tensors.h"
22: #if defined(PETSC_HAVE_OPENMP)
23: #include <omp.h>
24: #endif
26: /* vector padding not supported */
27: #define LANDAU_VL 1
29: static PetscErrorCode LandauMatMult(Mat A, Vec x, Vec y)
30: {
31: LandauCtx *ctx;
32: PetscContainer container;
34: PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);
35: if (container) {
36: PetscContainerGetPointer(container, (void **) &ctx);
37: VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);
38: VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);
39: (*ctx->seqaij_mult)(A,ctx->work_vec,y);
40: VecCopy(y, ctx->work_vec);
41: VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);
42: VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);
43: return 0;
44: }
45: MatMult(A,x,y);
46: return 0;
47: }
49: // Computes v3 = v2 + A * v1.
50: static PetscErrorCode LandauMatMultAdd(Mat A,Vec v1,Vec v2,Vec v3)
51: {
52: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "?????");
53: LandauMatMult(A,v1,v3);
54: VecAYPX(v3,1,v2);
55: return 0;
56: }
58: static PetscErrorCode LandauMatMultTranspose(Mat A, Vec x, Vec y)
59: {
60: LandauCtx *ctx;
61: PetscContainer container;
63: PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);
64: if (container) {
65: PetscContainerGetPointer(container, (void **) &ctx);
66: VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);
67: VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);
68: (*ctx->seqaij_multtranspose)(A,ctx->work_vec,y);
69: VecCopy(y, ctx->work_vec);
70: VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);
71: VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);
72: return 0;
73: }
74: MatMultTranspose(A,x,y);
75: return 0;
76: }
78: static PetscErrorCode LandauMatGetDiagonal(Mat A,Vec x)
79: {
80: LandauCtx *ctx;
81: PetscContainer container;
83: PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);
84: if (container) {
85: PetscContainerGetPointer(container, (void **) &ctx);
86: (*ctx->seqaij_getdiagonal)(A,ctx->work_vec);
87: VecScatterBegin(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE);
88: VecScatterEnd(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE);
89: return 0;
90: }
91: MatGetDiagonal(A, x);
92: return 0;
93: }
95: static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
96: {
97: P4estVertexMaps *maps = (P4estVertexMaps*)ptr;
98: // free device data
99: if (maps[0].deviceType != LANDAU_CPU) {
100: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
101: if (maps[0].deviceType == LANDAU_KOKKOS) {
102: LandauKokkosDestroyMatMaps(maps, maps[0].numgrids); // imples Kokkos does
103: } // else could be CUDA
104: #elif defined(PETSC_HAVE_CUDA)
105: if (maps[0].deviceType == LANDAU_CUDA) {
106: LandauCUDADestroyMatMaps(maps, maps[0].numgrids);
107: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %" PetscInt_FMT " ?????",maps->deviceType);
108: #endif
109: }
110: // free host data
111: for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) {
112: PetscFree(maps[grid].c_maps);
113: PetscFree(maps[grid].gIdx);
114: }
115: PetscFree(maps);
117: return 0;
118: }
119: static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
120: {
121: PetscReal v2 = 0;
122: /* compute v^2 / 2 */
123: for (int i = 0; i < dim; ++i) v2 += x[i]*x[i];
124: /* evaluate the Maxwellian */
125: u[0] = v2/2;
126: return 0;
127: }
129: /* needs double */
130: static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
131: {
132: PetscReal *c2_0_arr = ((PetscReal*)actx);
133: double u2 = 0, c02 = (double)*c2_0_arr, xx;
135: /* compute u^2 / 2 */
136: for (int i = 0; i < dim; ++i) u2 += x[i]*x[i];
137: /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
138: xx = u2/c02;
139: #if defined(PETSC_USE_DEBUG)
140: u[0] = PetscSqrtReal(1. + xx);
141: #else
142: u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
143: #endif
144: return 0;
145: }
147: /*
148: LandauFormJacobian_Internal - Evaluates Jacobian matrix.
150: Input Parameters:
151: . globX - input vector
152: . actx - optional user-defined context
153: . dim - dimension
155: Output Parameters:
156: . J0acP - Jacobian matrix filled, not created
157: */
158: static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
159: {
160: LandauCtx *ctx = (LandauCtx*)a_ctx;
161: PetscInt numCells[LANDAU_MAX_GRIDS],Nq,Nb;
162: PetscQuadrature quad;
163: PetscReal Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
164: PetscScalar *cellClosure=NULL;
165: const PetscScalar *xdata=NULL;
166: PetscDS prob;
167: PetscContainer container;
168: P4estVertexMaps *maps;
169: Mat subJ[LANDAU_MAX_GRIDS*LANDAU_MAX_BATCH_SZ];
174: /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
176: PetscLogEventBegin(ctx->events[10],0,0,0,0);
177: DMGetDS(ctx->plex[0], &prob); // same DS for all grids
178: PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);
179: if (container) {
181: PetscContainerGetPointer(container, (void **) &maps);
183: for (PetscInt i=0;i<ctx->num_grids*ctx->batch_sz;i++) subJ[i] = NULL;
184: } else {
186: for (PetscInt tid=0 ; tid<ctx->batch_sz ; tid++) {
187: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
188: DMCreateMatrix(ctx->plex[grid], &subJ[ LAND_PACK_IDX(tid,grid) ]);
189: }
190: }
191: maps = NULL;
192: }
193: // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
194: PetscFEGetQuadrature(ctx->fe[0], &quad);
195: PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL); Nb = Nq;
197: // get metadata for collecting dynamic data
198: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
199: PetscInt cStart, cEnd;
201: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
202: numCells[grid] = cEnd - cStart; // grids can have different topology
203: }
204: PetscLogEventEnd(ctx->events[10],0,0,0,0);
205: if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
206: DM pack;
207: VecGetDM(a_X, &pack);
209: PetscLogEventBegin(ctx->events[1],0,0,0,0);
210: MatZeroEntries(JacP);
211: for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
212: Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
213: if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */
214: }
215: if (!ctx->gpu_assembly) {
216: Vec *locXArray,*globXArray;
217: PetscScalar *cellClosure_it;
218: PetscInt cellClosure_sz=0,nDMs,Nf[LANDAU_MAX_GRIDS];
219: PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
220: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
221: DMGetLocalSection(ctx->plex[grid], §ion[grid]);
222: DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);
223: PetscSectionGetNumFields(section[grid], &Nf[grid]);
224: }
225: /* count cellClosure size */
226: DMCompositeGetNumberDM(pack,&nDMs);
227: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid];
228: PetscMalloc1(cellClosure_sz*ctx->batch_sz,&cellClosure);
229: cellClosure_it = cellClosure;
230: PetscMalloc(sizeof(*locXArray)*nDMs, &locXArray);
231: PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray);
232: DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray);
233: DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray);
234: for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP (once)
235: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
236: Vec locX = locXArray[ LAND_PACK_IDX(b_id,grid) ], globX = globXArray[ LAND_PACK_IDX(b_id,grid) ], locX2;
237: PetscInt cStart, cEnd, ei;
238: VecDuplicate(locX,&locX2);
239: DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2);
240: DMGlobalToLocalEnd (ctx->plex[grid], globX, INSERT_VALUES, locX2);
241: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
242: for (ei = cStart ; ei < cEnd; ++ei) {
243: PetscScalar *coef = NULL;
244: DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);
245: PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it)); /* change if LandauIPReal != PetscScalar */
246: DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);
247: cellClosure_it += Nb*Nf[grid];
248: }
249: VecDestroy(&locX2);
250: }
251: }
253: DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray);
254: DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray);
255: PetscFree(locXArray);
256: PetscFree(globXArray);
257: xdata = NULL;
258: } else {
259: PetscMemType mtype;
260: if (ctx->jacobian_field_major_order) { // get data in batch ordering
261: VecScatterBegin(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);
262: VecScatterEnd(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);
263: VecGetArrayReadAndMemType(ctx->work_vec,&xdata,&mtype);
264: } else {
265: VecGetArrayReadAndMemType(a_X,&xdata,&mtype);
266: }
267: if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) {
268: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij");
269: }
270: cellClosure = NULL;
271: }
272: PetscLogEventEnd(ctx->events[1],0,0,0,0);
273: } else xdata = cellClosure = NULL;
275: /* do it */
276: if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
277: if (ctx->deviceType == LANDAU_CUDA) {
278: #if defined(PETSC_HAVE_CUDA)
279: LandauCUDAJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP);
280: #else
281: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
282: #endif
283: } else if (ctx->deviceType == LANDAU_KOKKOS) {
284: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
285: LandauKokkosJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP);
286: #else
287: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
288: #endif
289: }
290: } else { /* CPU version */
291: PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
292: PetscInt ip_offset[LANDAU_MAX_GRIDS+1], ipf_offset[LANDAU_MAX_GRIDS+1], elem_offset[LANDAU_MAX_GRIDS+1],IPf_sz_glb,IPf_sz_tot,num_grids=ctx->num_grids,Nf[LANDAU_MAX_GRIDS];
293: PetscReal *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
294: PetscReal Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
295: PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
296: PetscScalar *coo_vals=NULL;
297: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
298: DMGetLocalSection(ctx->plex[grid], §ion[grid]);
299: DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);
300: PetscSectionGetNumFields(section[grid], &Nf[grid]);
301: }
302: /* count IPf size, etc */
303: PetscDSGetTabulation(prob, &Tf); // Bf, &Df same for all grids
304: const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
305: ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
306: for (PetscInt grid=0 ; grid<num_grids ; grid++) {
307: PetscInt nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid];
308: elem_offset[grid+1] = elem_offset[grid] + numCells[grid];
309: ip_offset[grid+1] = ip_offset[grid] + numCells[grid]*Nq;
310: ipf_offset[grid+1] = ipf_offset[grid] + Nq*nfloc*numCells[grid];
311: }
312: IPf_sz_glb = ipf_offset[num_grids];
313: IPf_sz_tot = IPf_sz_glb*ctx->batch_sz;
314: // prep COO
315: if (ctx->coo_assembly) {
316: PetscMalloc1(ctx->SData_d.coo_size,&coo_vals); // allocate every time?
317: PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n",ctx->SData_d.coo_size);
318: }
319: if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */
320: #if defined(PETSC_HAVE_THREADSAFETY)
321: double starttime, endtime;
322: starttime = MPI_Wtime();
323: #endif
324: PetscLogEventBegin(ctx->events[8],0,0,0,0);
325: for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
326: invMass[fieldA] = ctx->m_0/ctx->masses[fieldA];
327: Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
328: if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */
329: nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->m_0)*ctx->m_0/ctx->masses[fieldA];
330: nu_beta[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
331: }
332: PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim==3 ? IPf_sz_tot : 0, &dudz);
333: // F df/dx
334: for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
335: const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; // b_id == OMP thd_id in batch
336: // find my grid:
337: PetscInt grid = 0;
338: while (b_elem_idx >= elem_offset[grid+1]) grid++; // yuck search for grid
339: {
340: const PetscInt loc_nip = numCells[grid]*Nq, loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
341: const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
342: PetscScalar *coef, coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ];
343: PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; // ingJ is static data on batch 0
344: PetscInt b,f,q;
345: if (cellClosure) {
346: coef = &cellClosure[b_id*IPf_sz_glb + ipf_offset[grid] + loc_elem*Nb*loc_Nf]; // this is const
347: } else {
348: coef = coef_buff;
349: for (f = 0; f < loc_Nf; ++f) {
350: LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
351: for (b = 0; b < Nb; ++b) {
352: PetscInt idx = Idxs[b];
353: if (idx >= 0) {
354: coef[f*Nb+b] = xdata[idx+moffset];
355: } else {
356: idx = -idx - 1;
357: coef[f*Nb+b] = 0;
358: for (q = 0; q < maps[grid].num_face; q++) {
359: PetscInt id = maps[grid].c_maps[idx][q].gid;
360: PetscScalar scale = maps[grid].c_maps[idx][q].scale;
361: coef[f*Nb+b] += scale*xdata[id+moffset];
362: }
363: }
364: }
365: }
366: }
367: /* get f and df */
368: for (PetscInt qi = 0; qi < Nq; qi++) {
369: const PetscReal *invJ = &invJe[qi*dim*dim];
370: const PetscReal *Bq = &BB[qi*Nb];
371: const PetscReal *Dq = &DD[qi*Nb*dim];
372: PetscReal u_x[LANDAU_DIM];
373: /* get f & df */
374: for (f = 0; f < loc_Nf; ++f) {
375: const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid] + f*loc_nip + loc_elem*Nq + qi;
376: PetscInt b, e;
377: PetscReal refSpaceDer[LANDAU_DIM];
378: ff[idx] = 0.0;
379: for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
380: for (b = 0; b < Nb; ++b) {
381: const PetscInt cidx = b;
382: ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]);
383: for (int d = 0; d < dim; ++d) {
384: refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]);
385: }
386: }
387: for (int d = 0; d < LANDAU_DIM; ++d) {
388: for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) {
389: u_x[d] += invJ[e*dim+d]*refSpaceDer[e];
390: }
391: }
392: dudx[idx] = u_x[0];
393: dudy[idx] = u_x[1];
394: #if LANDAU_DIM==3
395: dudz[idx] = u_x[2];
396: #endif
397: }
398: } // q
399: } // grid
400: } // grid*batch
401: PetscLogEventEnd(ctx->events[8],0,0,0,0);
402: #if defined(PETSC_HAVE_THREADSAFETY)
403: endtime = MPI_Wtime();
404: if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
405: #endif
406: } // Jacobian setup
407: // assemble Jacobian (or mass)
408: for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
409: const PetscInt b_Nelem = elem_offset[num_grids];
410: const PetscInt glb_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem;
411: PetscInt grid = 0;
412: #if defined(PETSC_HAVE_THREADSAFETY)
413: double starttime, endtime;
414: starttime = MPI_Wtime();
415: #endif
416: while (glb_elem_idx >= elem_offset[grid+1]) grid++;
417: {
418: const PetscInt loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
419: const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset), totDim = loc_Nf*Nq, elemMatSize = totDim*totDim;
420: PetscScalar *elemMat;
421: const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim];
422: PetscMalloc1(elemMatSize, &elemMat);
423: PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat));
424: if (shift==0.0) { // Jacobian
425: PetscLogEventBegin(ctx->events[4],0,0,0,0);
426: } else { // mass
427: PetscLogEventBegin(ctx->events[16],0,0,0,0);
428: }
429: for (PetscInt qj = 0; qj < Nq; ++qj) {
430: const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
431: PetscReal g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
432: PetscInt d,d2,dp,d3,IPf_idx;
433: if (shift==0.0) { // Jacobian
434: const PetscReal * const invJj = &invJe[qj*dim*dim];
435: PetscReal gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
436: const PetscReal vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
437: // create g2 & g3
438: for (d=0;d<LANDAU_DIM;d++) { // clear accumulation data D & K
439: gg2_temp[d] = 0;
440: for (d2=0;d2<LANDAU_DIM;d2++) gg3_temp[d][d2] = 0;
441: }
442: /* inner beta reduction */
443: IPf_idx = 0;
444: for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
445: PetscInt nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r];
446: for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
447: for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
448: const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
449: PetscReal temp1[3] = {0, 0, 0}, temp2 = 0;
450: #if LANDAU_DIM==2
451: PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
452: LandauTensor2D(vj, x, y, Ud, Uk, mask);
453: #else
454: PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
455: if (ctx->use_relativistic_corrections) {
456: LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
457: } else {
458: LandauTensor3D(vj, x, y, z, U, mask);
459: }
460: #endif
461: for (int f = 0; f < Nfloc_r ; ++f) {
462: const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid_r] + f*nip_loc_r + ei_r*Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx;
463: temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off];
464: temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off];
465: #if LANDAU_DIM==3
466: temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off];
467: #endif
468: temp2 += ff[idx]*nu_beta[f+f_off];
469: }
470: temp1[0] *= wi;
471: temp1[1] *= wi;
472: #if LANDAU_DIM==3
473: temp1[2] *= wi;
474: #endif
475: temp2 *= wi;
476: #if LANDAU_DIM==2
477: for (d2 = 0; d2 < 2; d2++) {
478: for (d3 = 0; d3 < 2; ++d3) {
479: /* K = U * grad(f): g2=e: i,A */
480: gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
481: /* D = -U * (I \kron (fx)): g3=f: i,j,A */
482: gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
483: }
484: }
485: #else
486: for (d2 = 0; d2 < 3; ++d2) {
487: for (d3 = 0; d3 < 3; ++d3) {
488: /* K = U * grad(f): g2 = e: i,A */
489: gg2_temp[d2] += U[d2][d3]*temp1[d3];
490: /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
491: gg3_temp[d2][d3] += U[d2][d3]*temp2;
492: }
493: }
494: #endif
495: } // qi
496: } // ei_r
497: IPf_idx += nip_loc_r*Nfloc_r;
498: } /* grid_r - IPs */
500: // add alpha and put in gg2/3
501: for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
502: for (d2 = 0; d2 < dim; d2++) {
503: gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off];
504: for (d3 = 0; d3 < dim; d3++) {
505: gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off];
506: }
507: }
508: }
509: /* add electric field term once per IP */
510: for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < loc_Nf; ++fieldA) {
511: gg2[fieldA][dim-1] += Eq_m[fieldA+f_off];
512: }
513: /* Jacobian transform - g2, g3 */
514: for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
515: for (d = 0; d < dim; ++d) {
516: g2[fieldA][d] = 0.0;
517: for (d2 = 0; d2 < dim; ++d2) {
518: g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2];
519: g3[fieldA][d][d2] = 0.0;
520: for (d3 = 0; d3 < dim; ++d3) {
521: for (dp = 0; dp < dim; ++dp) {
522: g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp];
523: }
524: }
525: g3[fieldA][d][d2] *= wj;
526: }
527: g2[fieldA][d] *= wj;
528: }
529: }
530: } else { // mass
531: PetscReal wj = ww[jpidx_glb];
532: /* Jacobian transform - g0 */
533: for (PetscInt fieldA = 0; fieldA < loc_Nf ; ++fieldA) {
534: if (dim==2) {
535: g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
536: } else {
537: g0[fieldA] = wj * shift; // move this to below and remove g0
538: }
539: }
540: }
541: /* FE matrix construction */
542: {
543: PetscInt fieldA,d,f,d2,g;
544: const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
545: /* assemble - on the diagonal (I,I) */
546: for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
547: for (f = 0; f < Nb ; f++) {
548: const PetscInt i = fieldA*Nb + f; /* Element matrix row */
549: for (g = 0; g < Nb; ++g) {
550: const PetscInt j = fieldA*Nb + g; /* Element matrix column */
551: const PetscInt fOff = i*totDim + j;
552: if (shift==0.0) {
553: for (d = 0; d < dim; ++d) {
554: elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g];
555: for (d2 = 0; d2 < dim; ++d2) {
556: elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2];
557: }
558: }
559: } else { // mass
560: elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g];
561: }
562: }
563: }
564: }
565: }
566: } /* qj loop */
567: if (shift==0.0) { // Jacobian
568: PetscLogEventEnd(ctx->events[4],0,0,0,0);
569: } else {
570: PetscLogEventEnd(ctx->events[16],0,0,0,0);
571: }
572: #if defined(PETSC_HAVE_THREADSAFETY)
573: endtime = MPI_Wtime();
574: if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
575: #endif
576: /* assemble matrix */
577: if (!container) {
578: PetscInt cStart;
579: PetscLogEventBegin(ctx->events[6],0,0,0,0);
580: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL);
581: DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[ LAND_PACK_IDX(b_id,grid) ], loc_elem + cStart, elemMat, ADD_VALUES);
582: PetscLogEventEnd(ctx->events[6],0,0,0,0);
583: } else { // GPU like assembly for debugging
584: PetscInt fieldA,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE]={0},cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
585: PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]={0},row_scale[LANDAU_MAX_Q_FACE]={0},col_scale[LANDAU_MAX_Q_FACE]={0};
586: LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
587: /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
588: for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
589: LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
590: for (f = 0; f < Nb ; f++) {
591: PetscInt idx = Idxs[f];
592: if (idx >= 0) {
593: nr = 1;
594: rows0[0] = idx;
595: row_scale[0] = 1.;
596: } else {
597: idx = -idx - 1;
598: for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
599: if (maps[grid].c_maps[idx][q].gid < 0) break;
600: rows0[q] = maps[grid].c_maps[idx][q].gid;
601: row_scale[q] = maps[grid].c_maps[idx][q].scale;
602: }
603: }
604: for (g = 0; g < Nb; ++g) {
605: idx = Idxs[g];
606: if (idx >= 0) {
607: nc = 1;
608: cols0[0] = idx;
609: col_scale[0] = 1.;
610: } else {
611: idx = -idx - 1;
612: nc = maps[grid].num_face;
613: for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
614: if (maps[grid].c_maps[idx][q].gid < 0) break;
615: cols0[q] = maps[grid].c_maps[idx][q].gid;
616: col_scale[q] = maps[grid].c_maps[idx][q].scale;
617: }
618: }
619: const PetscInt i = fieldA*Nb + f; /* Element matrix row */
620: const PetscInt j = fieldA*Nb + g; /* Element matrix column */
621: const PetscScalar Aij = elemMat[i*totDim + j];
622: if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
623: const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
624: const int idx0 = b_id*coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
625: for (int q = 0, idx2 = idx0; q < nr; q++) {
626: for (int d = 0; d < nc; d++, idx2++) {
627: coo_vals[idx2] = row_scale[q]*col_scale[d]*Aij;
628: }
629: }
630: } else {
631: for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
632: for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
633: for (q = 0; q < nr; q++) {
634: for (d = 0; d < nc; d++) {
635: vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij;
636: }
637: }
638: MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES);
639: }
640: }
641: }
642: }
643: }
644: if (loc_elem==-1) {
645: PetscPrintf(ctx->comm,"CPU Element matrix\n");
646: for (int d = 0; d < totDim; ++d) {
647: for (int f = 0; f < totDim; ++f) PetscPrintf(ctx->comm," %12.5e", PetscRealPart(elemMat[d*totDim + f]));
648: PetscPrintf(ctx->comm,"\n");
649: }
650: exit(12);
651: }
652: PetscFree(elemMat);
653: } /* grid */
654: } /* outer element & batch loop */
655: if (shift==0.0) { // mass
656: PetscFree4(ff, dudx, dudy, dudz);
657: }
658: if (!container) { // 'CPU' assembly move nest matrix to global JacP
659: for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP
660: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
661: const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
662: PetscInt nloc, nzl, colbuf[1024], row;
663: const PetscInt *cols;
664: const PetscScalar *vals;
665: Mat B = subJ[ LAND_PACK_IDX(b_id,grid) ];
666: MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY);
667: MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY);
668: MatGetSize(B, &nloc, NULL);
669: for (int i=0 ; i<nloc ; i++) {
670: MatGetRow(B,i,&nzl,&cols,&vals);
672: for (int j=0; j<nzl; j++) colbuf[j] = moffset + cols[j];
673: row = moffset + i;
674: MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES);
675: MatRestoreRow(B,i,&nzl,&cols,&vals);
676: }
677: MatDestroy(&B);
678: }
679: }
680: }
681: if (coo_vals) {
682: MatSetValuesCOO(JacP,coo_vals,ADD_VALUES);
683: PetscFree(coo_vals);
684: }
685: } /* CPU version */
686: MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY);
687: MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY);
688: /* clean up */
689: if (cellClosure) {
690: PetscFree(cellClosure);
691: }
692: if (xdata) {
693: VecRestoreArrayReadAndMemType(a_X,&xdata);
694: }
695: return 0;
696: }
698: #if defined(LANDAU_ADD_BCS)
699: static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux,
700: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
701: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
702: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
703: {
704: uexact[0] = 0;
705: }
706: #endif
708: #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }}
709: static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y,
710: PetscReal *outX, PetscReal *outY)
711: {
712: PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact;
713: if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
714: *outX = x; *outY = y;
715: } else {
716: const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr;
717: PetscReal cth,sth,xyprime[2],Rth[2][2],rotcos,newrr;
718: if (num_sections==2) {
719: rotcos = 0.70710678118654;
720: outfact = 1.5; efact = 2.5;
721: /* rotate normalized vector into [-pi/4,pi/4) */
722: if (sinphi >= 0.) { /* top cell, -pi/2 */
723: cth = 0.707106781186548; sth = -0.707106781186548;
724: } else { /* bottom cell -pi/8 */
725: cth = 0.707106781186548; sth = .707106781186548;
726: }
727: } else if (num_sections==3) {
728: rotcos = 0.86602540378443;
729: outfact = 1.5; efact = 2.5;
730: /* rotate normalized vector into [-pi/6,pi/6) */
731: if (sinphi >= 0.5) { /* top cell, -pi/3 */
732: cth = 0.5; sth = -0.866025403784439;
733: } else if (sinphi >= -.5) { /* mid cell 0 */
734: cth = 1.; sth = .0;
735: } else { /* bottom cell +pi/3 */
736: cth = 0.5; sth = 0.866025403784439;
737: }
738: } else if (num_sections==4) {
739: rotcos = 0.9238795325112;
740: outfact = 1.5; efact = 3;
741: /* rotate normalized vector into [-pi/8,pi/8) */
742: if (sinphi >= 0.707106781186548) { /* top cell, -3pi/8 */
743: cth = 0.38268343236509; sth = -0.923879532511287;
744: } else if (sinphi >= 0.) { /* mid top cell -pi/8 */
745: cth = 0.923879532511287; sth = -.38268343236509;
746: } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
747: cth = 0.923879532511287; sth = 0.38268343236509;
748: } else { /* bottom cell + 3pi/8 */
749: cth = 0.38268343236509; sth = .923879532511287;
750: }
751: } else {
752: cth = 0.; sth = 0.; rotcos = 0; efact = 0;
753: }
754: Rth[0][0] = cth; Rth[0][1] =-sth;
755: Rth[1][0] = sth; Rth[1][1] = cth;
756: MATVEC2(Rth,xy,xyprime);
757: if (num_sections==2) {
758: newrr = xyprime[0]/rotcos;
759: } else {
760: PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin;
761: PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax;
762: newrr = rin + routfrac*nroutmax;
763: }
764: *outX = cosphi*newrr; *outY = sinphi*newrr;
765: /* grade */
766: PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
767: if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */
768: else { rs = r1; re = r2; fact = efact;} /* electron zone */
769: tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr;
770: *outX *= tt;
771: *outY *= tt;
772: }
773: }
775: static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
776: {
777: LandauCtx *ctx = (LandauCtx*)a_ctx;
778: PetscReal r = abc[0], z = abc[1];
779: if (ctx->inflate) {
780: PetscReal absR, absZ;
781: absR = PetscAbs(r);
782: absZ = PetscAbs(z);
783: CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on?
784: r = (r > 0) ? absR : -absR;
785: z = (z > 0) ? absZ : -absZ;
786: }
787: xyz[0] = r;
788: xyz[1] = z;
789: if (dim==3) xyz[2] = abc[2];
791: return 0;
792: }
794: /* create DMComposite of meshes for each species group */
795: static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
796: {
797: { /* p4est, quads */
798: /* Create plex mesh of Landau domain */
799: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
800: PetscReal radius = ctx->radius[grid];
801: if (!ctx->sphere) {
802: PetscInt cells[] = {2,2,2};
803: PetscReal lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius};
804: DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
805: if (dim==2) { lo[0] = 0; cells[0] /* = cells[1] */ = 1; }
806: DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid]); // todo: make composite and create dm[grid] here
807: DMLocalizeCoordinates(ctx->plex[grid]); /* needed for periodic */
808: if (dim==3) PetscObjectSetName((PetscObject) ctx->plex[grid], "cube");
809: else PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane");
810: } else if (dim==2) { // sphere is all wrong. should just have one inner radius
811: PetscInt numCells,cells[16][4],i,j;
812: PetscInt numVerts;
813: PetscReal inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius;
814: PetscReal *flatCoords = NULL;
815: PetscInt *flatCells = NULL, *pcell;
816: if (ctx->num_sections==2) {
817: #if 1
818: numCells = 5;
819: numVerts = 10;
820: int cells2[][4] = { {0,1,4,3},
821: {1,2,5,4},
822: {3,4,7,6},
823: {4,5,8,7},
824: {6,7,8,9} };
825: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
826: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
827: {
828: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
829: for (j = 0; j < numVerts-1; j++) {
830: PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2;
831: PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid];
832: z = rad * PetscSinReal(theta);
833: coords[j][1] = z;
834: r = rad * PetscCosReal(theta);
835: coords[j][0] = r;
836: }
837: coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
838: }
839: #else
840: numCells = 4;
841: numVerts = 8;
842: static int cells2[][4] = {{0,1,2,3},
843: {4,5,1,0},
844: {5,6,2,1},
845: {6,7,3,2}};
846: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
847: loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
848: {
849: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
850: PetscInt j;
851: for (j = 0; j < 8; j++) {
852: PetscReal z, r;
853: PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.;
854: PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0);
855: z = rad * PetscSinReal(theta);
856: coords[j][1] = z;
857: r = rad * PetscCosReal(theta);
858: coords[j][0] = r;
859: }
860: }
861: #endif
862: } else if (ctx->num_sections==3) {
863: numCells = 7;
864: numVerts = 12;
865: int cells2[][4] = { {0,1,5,4},
866: {1,2,6,5},
867: {2,3,7,6},
868: {4,5,9,8},
869: {5,6,10,9},
870: {6,7,11,10},
871: {8,9,10,11} };
872: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
873: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
874: {
875: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
876: for (j = 0; j < numVerts; j++) {
877: PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3;
878: PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid];
879: z = rad * PetscSinReal(theta);
880: coords[j][1] = z;
881: r = rad * PetscCosReal(theta);
882: coords[j][0] = r;
883: }
884: }
885: } else if (ctx->num_sections==4) {
886: numCells = 10;
887: numVerts = 16;
888: int cells2[][4] = { {0,1,6,5},
889: {1,2,7,6},
890: {2,3,8,7},
891: {3,4,9,8},
892: {5,6,11,10},
893: {6,7,12,11},
894: {7,8,13,12},
895: {8,9,14,13},
896: {10,11,12,15},
897: {12,13,14,15}};
898: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
899: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
900: {
901: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
902: for (j = 0; j < numVerts-1; j++) {
903: PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4;
904: PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid];
905: z = rad * PetscSinReal(theta);
906: coords[j][1] = z;
907: r = rad * PetscCosReal(theta);
908: coords[j][0] = r;
909: }
910: coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
911: }
912: } else {
913: numCells = 0;
914: numVerts = 0;
915: }
916: for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
917: pcell[0] = cells[j][0]; pcell[1] = cells[j][1];
918: pcell[2] = cells[j][2]; pcell[3] = cells[j][3];
919: }
920: DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid]);
921: PetscFree2(flatCoords,flatCells);
922: PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle");
923: } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
925: DMSetFromOptions(ctx->plex[grid]);
926: } // grid loop
927: PetscObjectSetOptionsPrefix((PetscObject)pack,prefix);
928: DMSetFromOptions(pack);
930: { /* convert to p4est (or whatever), wait for discretization to create pack */
931: char convType[256];
932: PetscBool flg;
935: PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
936: PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg);
937: PetscOptionsEnd();
938: if (flg) {
939: ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
940: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
941: DM dmforest;
942: DMConvert(ctx->plex[grid],convType,&dmforest);
943: if (dmforest) {
944: PetscBool isForest;
945: PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix);
946: DMIsForest(dmforest,&isForest);
947: if (isForest) {
948: if (ctx->sphere && ctx->inflate) {
949: DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx);
950: }
951: DMDestroy(&ctx->plex[grid]);
952: ctx->plex[grid] = dmforest; // Forest for adaptivity
953: } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
954: } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
955: }
956: } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
957: }
958: } /* non-file */
959: DMSetDimension(pack, dim);
960: PetscObjectSetName((PetscObject) pack, "Mesh");
961: DMSetApplicationContext(pack, ctx);
963: return 0;
964: }
966: static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
967: {
968: PetscInt ii,i0;
969: char buf[256];
970: PetscSection section;
972: for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
973: if (ii==0) PetscSNPrintf(buf, sizeof(buf), "e");
974: else PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii);
975: /* Setup Discretization - FEM */
976: PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]);
977: PetscObjectSetName((PetscObject) ctx->fe[ii], buf);
978: DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii]);
979: }
980: DMCreateDS(ctx->plex[grid]);
981: DMGetSection(ctx->plex[grid], §ion);
982: for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
983: if (ii==0) PetscSNPrintf(buf, sizeof(buf), "se");
984: else PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii);
985: PetscSectionSetComponentName(section, i0, 0, buf);
986: }
987: return 0;
988: }
990: /* Define a Maxwellian function for testing out the operator. */
992: /* Using cartesian velocity space coordinates, the particle */
993: /* density, [1/m^3], is defined according to */
995: /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
997: /* Using some constant, c, we normalize the velocity vector into a */
998: /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
1000: /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
1002: /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
1003: /* for finding the particle within the interval in a box dx^3 around x is */
1005: /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
1007: typedef struct {
1008: PetscReal v_0;
1009: PetscReal kT_m;
1010: PetscReal n;
1011: PetscReal shift;
1012: } MaxwellianCtx;
1014: static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
1015: {
1016: MaxwellianCtx *mctx = (MaxwellianCtx*)actx;
1017: PetscInt i;
1018: PetscReal v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */
1019: /* compute the exponents, v^2 */
1020: for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
1021: /* evaluate the Maxwellian */
1022: u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1023: if (mctx->shift!=0.) {
1024: v2 = 0;
1025: for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i];
1026: v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift);
1027: /* evaluate the shifted Maxwellian */
1028: u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1029: }
1030: return 0;
1031: }
1033: /*@
1034: DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state
1036: Collective on X
1038: Input Parameters:
1039: . dm - The mesh (local)
1040: + time - Current time
1041: - temps - Temperatures of each species (global)
1042: . ns - Number density of each species (global)
1043: - grid - index into current grid - just used for offset into temp and ns
1044: + actx - Landau context
1046: Output Parameter:
1047: . X - The state (local to this grid)
1049: Level: beginner
1051: .keywords: mesh
1052: .seealso: DMPlexLandauCreateVelocitySpace()
1053: @*/
1054: PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, void *actx)
1055: {
1056: LandauCtx *ctx = (LandauCtx*)actx;
1057: PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *);
1058: PetscInt dim;
1059: MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
1061: DMGetDimension(dm, &dim);
1062: if (!ctx) DMGetApplicationContext(dm, &ctx);
1063: for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1064: mctxs[i0] = &data[i0];
1065: data[i0].v_0 = ctx->v_0; // v_0 same for all grids
1066: data[i0].kT_m = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */
1067: data[i0].n = ns[ii] * (1+(double)b_id/100.0); // make solves a little different to mimic application, n[0] use for Conner-Hastie
1068: initu[i0] = maxwellian;
1069: data[i0].shift = 0;
1070: }
1071: data[0].shift = ctx->electronShift;
1072: /* need to make ADD_ALL_VALUES work - TODO */
1073: DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X);
1074: return 0;
1075: }
1077: /*
1078: LandauSetInitialCondition - Addes Maxwellians with context
1080: Collective on X
1082: Input Parameters:
1083: . dm - The mesh
1084: - grid - index into current grid - just used for offset into temp and ns
1085: + actx - Landau context with T and n
1087: Output Parameter:
1088: . X - The state
1090: Level: beginner
1092: .keywords: mesh
1093: .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauAddMaxwellians()
1094: */
1095: static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, void *actx)
1096: {
1097: LandauCtx *ctx = (LandauCtx*)actx;
1098: if (!ctx) DMGetApplicationContext(dm, &ctx);
1099: VecZeroEntries(X);
1100: DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, ctx);
1101: return 0;
1102: }
1104: // adapt a level once. Forest in/out
1105: static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
1106: {
1107: DM forest, plex, adaptedDM = NULL;
1108: PetscDS prob;
1109: PetscBool isForest;
1110: PetscQuadrature quad;
1111: PetscInt Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1112: DMLabel adaptLabel = NULL;
1114: forest = ctx->plex[grid];
1115: DMCreateDS(forest);
1116: DMGetDS(forest, &prob);
1117: DMGetDimension(forest, &dim);
1118: DMIsForest(forest, &isForest);
1120: DMConvert(forest, DMPLEX, &plex);
1121: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1122: DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel);
1123: PetscFEGetQuadrature(fem, &quad);
1124: PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);
1126: PetscDSGetDimensions(prob, &Nb);
1127: if (type==4) {
1128: for (c = cStart; c < cEnd; c++) {
1129: DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1130: }
1131: PetscInfo(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM");
1132: } else if (type==2) {
1133: PetscInt rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2;
1134: PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1135: for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1136: for (c = cStart; c < cEnd; c++) {
1137: PetscReal tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ];
1138: DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ);
1139: for (qj = 0; qj < Nq; ++qj) {
1140: tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0));
1141: r = PetscSqrtReal(tt);
1142: if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) {
1143: minRad = r;
1144: nr = 0;
1145: rCellIdx[nr++]= c;
1146: PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", r, c, qj+1, Nq);
1147: } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) {
1148: for (k=0;k<nr;k++) if (c == rCellIdx[k]) break;
1149: if (k==nr) {
1150: rCellIdx[nr++]= c;
1151: PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", r, c, qj+1, Nq, r-minRad);
1152: }
1153: }
1154: if (ctx->sphere) {
1155: if ((tt=r-ctx->e_radius) > 0) {
1156: PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n",c,tt);
1157: if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) {
1158: eMinRad = tt;
1159: eMaxIdx = 0;
1160: eCellIdx[eMaxIdx++] = c;
1161: } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) {
1162: eCellIdx[eMaxIdx++] = c;
1163: }
1164: }
1165: if ((tt=r-ctx->i_radius[grid]) > 0) {
1166: if (tt < iMinRad - 1.e-5) {
1167: iMinRad = tt;
1168: iMaxIdx = 0;
1169: iCellIdx[iMaxIdx++] = c;
1170: } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) {
1171: iCellIdx[iMaxIdx++] = c;
1172: }
1173: }
1174: }
1175: }
1176: }
1177: for (k=0;k<nr;k++) {
1178: DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE);
1179: }
1180: if (ctx->sphere) {
1181: for (c = 0; c < eMaxIdx; c++) {
1182: DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE);
1183: PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",eCellIdx[c],eMinRad);
1184: }
1185: for (c = 0; c < iMaxIdx; c++) {
1186: DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE);
1187: PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",iCellIdx[c],iMinRad);
1188: }
1189: }
1190: PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],minRad);
1191: } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */
1192: PetscScalar *coef = NULL;
1193: Vec coords;
1194: PetscInt csize,Nv,d,nz;
1195: DM cdm;
1196: PetscSection cs;
1197: DMGetCoordinatesLocal(forest, &coords);
1198: DMGetCoordinateDM(forest, &cdm);
1199: DMGetLocalSection(cdm, &cs);
1200: for (c = cStart; c < cEnd; c++) {
1201: PetscInt doit = 0, outside = 0;
1202: DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef);
1203: Nv = csize/dim;
1204: for (nz = d = 0; d < Nv; d++) {
1205: PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0);
1206: x = PetscSqrtReal(x);
1207: if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1; /* refine origin */
1208: else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++; /* first pass don't refine bottom */
1209: else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */
1210: else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */
1211: if (x < PETSC_MACHINE_EPSILON*10.) nz++;
1212: }
1213: DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef);
1214: if (doit || (outside<Nv && nz)) {
1215: DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1216: }
1217: }
1218: PetscInfo(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM");
1219: }
1220: DMDestroy(&plex);
1221: DMAdaptLabel(forest, adaptLabel, &adaptedDM);
1222: DMLabelDestroy(&adaptLabel);
1223: *newForest = adaptedDM;
1224: if (adaptedDM) {
1225: if (isForest) {
1226: DMForestSetAdaptivityForest(adaptedDM,NULL); // ????
1227: } else exit(33); // ???????
1228: DMConvert(adaptedDM, DMPLEX, &plex);
1229: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1230: PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart));
1231: DMDestroy(&plex);
1232: } else *newForest = NULL;
1233: return 0;
1234: }
1236: // forest goes in (ctx->plex[grid]), plex comes out
1237: static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
1238: {
1239: PetscInt adaptIter;
1241: PetscInt type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]};
1242: for (type=0;type<5;type++) {
1243: for (adaptIter = 0; adaptIter<limits[type];adaptIter++) {
1244: DM newForest = NULL;
1245: adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest);
1246: if (newForest) {
1247: DMDestroy(&ctx->plex[grid]);
1248: VecDestroy(uu);
1249: DMCreateGlobalVector(newForest,uu);
1250: PetscObjectSetName((PetscObject) *uu, "uAMR");
1251: LandauSetInitialCondition(newForest, *uu, grid, 0, ctx);
1252: ctx->plex[grid] = newForest;
1253: } else {
1254: exit(4); // can happen with no AMR and post refinement
1255: }
1256: }
1257: }
1258: return 0;
1259: }
1261: static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1262: {
1263: PetscErrorCode ierr;
1264: PetscBool flg, sph_flg;
1265: PetscInt ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS];
1266: PetscReal v0_grid[LANDAU_MAX_GRIDS];
1267: DM dummy;
1269: DMCreate(ctx->comm,&dummy);
1270: /* get options - initialize context */
1271: ctx->verbose = 1; // should be 0 for silent compliance
1272: #if defined(PETSC_HAVE_THREADSAFETY)
1273: ctx->batch_sz = PetscNumOMPThreads;
1274: #else
1275: ctx->batch_sz = 1;
1276: #endif
1277: ctx->batch_view_idx = 0;
1278: ctx->interpolate = PETSC_TRUE;
1279: ctx->gpu_assembly = PETSC_TRUE;
1280: ctx->aux_bool = PETSC_FALSE;
1281: ctx->electronShift = 0;
1282: ctx->M = NULL;
1283: ctx->J = NULL;
1284: /* geometry and grids */
1285: ctx->sphere = PETSC_FALSE;
1286: ctx->inflate = PETSC_FALSE;
1287: ctx->aux_bool = PETSC_FALSE;
1288: ctx->use_p4est = PETSC_FALSE;
1289: ctx->num_sections = 3; /* 2, 3 or 4 */
1290: for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) {
1291: ctx->radius[grid] = 5.; /* thermal radius (velocity) */
1292: ctx->numAMRRefine[grid] = 5;
1293: ctx->postAMRRefine[grid] = 0;
1294: ctx->species_offset[grid+1] = 1; // one species default
1295: num_species_grid[grid] = 0;
1296: ctx->plex[grid] = NULL; /* cache as expensive to Convert */
1297: }
1298: ctx->species_offset[0] = 0;
1299: ctx->re_radius = 0.;
1300: ctx->vperp0_radius1 = 0;
1301: ctx->vperp0_radius2 = 0;
1302: ctx->nZRefine1 = 0;
1303: ctx->nZRefine2 = 0;
1304: ctx->numRERefine = 0;
1305: num_species_grid[0] = 1; // one species default
1306: /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1307: ctx->charges[0] = -1; /* electron charge (MKS) */
1308: ctx->masses[0] = 1/1835.469965278441013; /* temporary value in proton mass */
1309: ctx->n[0] = 1;
1310: ctx->v_0 = 1; /* thermal velocity, we could start with a scale != 1 */
1311: ctx->thermal_temps[0] = 1;
1312: /* constants, etc. */
1313: ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */
1314: ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1315: ctx->lnLam = 10; /* cross section ratio large - small angle collisions */
1316: ctx->n_0 = 1.e20; /* typical plasma n, but could set it to 1 */
1317: ctx->Ez = 0;
1318: for (PetscInt grid=0;grid<LANDAU_NUM_TIMERS;grid++) ctx->times[grid] = 0;
1319: ctx->use_matrix_mass = PETSC_FALSE;
1320: ctx->use_relativistic_corrections = PETSC_FALSE;
1321: ctx->use_energy_tensor_trick = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1322: ctx->SData_d.w = NULL;
1323: ctx->SData_d.x = NULL;
1324: ctx->SData_d.y = NULL;
1325: ctx->SData_d.z = NULL;
1326: ctx->SData_d.invJ = NULL;
1327: ctx->jacobian_field_major_order = PETSC_FALSE;
1328: ctx->SData_d.coo_elem_offsets = NULL;
1329: ctx->SData_d.coo_elem_point_offsets = NULL;
1330: ctx->coo_assembly = PETSC_FALSE;
1331: ctx->SData_d.coo_elem_fullNb = NULL;
1332: ctx->SData_d.coo_size = 0;
1333: PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1334: {
1335: char opstring[256];
1336: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1337: ctx->deviceType = LANDAU_KOKKOS;
1338: PetscStrcpy(opstring,"kokkos");
1339: #elif defined(PETSC_HAVE_CUDA)
1340: ctx->deviceType = LANDAU_CUDA;
1341: PetscStrcpy(opstring,"cuda");
1342: #else
1343: ctx->deviceType = LANDAU_CPU;
1344: PetscStrcpy(opstring,"cpu");
1345: #endif
1346: PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,sizeof(opstring),NULL);
1347: PetscStrcmp("cpu",opstring,&flg);
1348: if (flg) {
1349: ctx->deviceType = LANDAU_CPU;
1350: } else {
1351: PetscStrcmp("cuda",opstring,&flg);
1352: if (flg) {
1353: ctx->deviceType = LANDAU_CUDA;
1354: } else {
1355: PetscStrcmp("kokkos",opstring,&flg);
1356: if (flg) ctx->deviceType = LANDAU_KOKKOS;
1357: else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring);
1358: }
1359: }
1360: }
1361: PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL);
1362: PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL);
1363: PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL);
1365: PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL);
1367: PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie critical field","plexland.c",ctx->Ez,&ctx->Ez, NULL);
1368: PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL);
1369: PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL);
1370: PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL);
1371: PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL);
1372: PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL);
1374: /* get num species with temperature, set defaults */
1375: for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) {
1376: ctx->thermal_temps[ii] = 1;
1377: ctx->charges[ii] = 1;
1378: ctx->masses[ii] = 1;
1379: ctx->n[ii] = 1;
1380: }
1381: nt = LANDAU_MAX_SPECIES;
1382: PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg);
1383: if (flg) {
1384: PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n",nt);
1385: ctx->num_species = nt;
1386: } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1387: for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1388: nm = LANDAU_MAX_SPECIES-1;
1389: PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg);
1390: if (flg && nm != ctx->num_species-1) {
1391: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT "",nm,ctx->num_species-1);
1392: }
1393: nm = LANDAU_MAX_SPECIES;
1394: PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg);
1396: for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1397: ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */
1398: ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */
1399: nc = LANDAU_MAX_SPECIES-1;
1400: PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg);
1402: for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1403: /* geometry and grids */
1404: nt = LANDAU_MAX_GRIDS;
1405: PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg);
1406: if (flg) {
1407: ctx->num_grids = nt;
1408: for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii];
1410: } else {
1411: ctx->num_grids = 1; // go back to a single grid run
1412: num_species_grid[0] = ctx->num_species;
1413: }
1414: for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii];
1416: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1417: int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid
1418: v0_grid[grid] = PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */
1419: }
1420: ii = 0;
1421: PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL);
1422: ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1423: ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */
1424: /* domain */
1425: nt = LANDAU_MAX_GRIDS;
1426: PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg);
1428: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1429: if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */
1430: if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1431: else ctx->radius[grid] = -ctx->radius[grid];
1432: ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1433: PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n",ctx->radius[grid],grid);
1434: }
1435: ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0
1436: }
1437: /* amr parametres */
1438: nt = LANDAU_MAX_GRIDS;
1439: PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg);
1441: nt = LANDAU_MAX_GRIDS;
1442: PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg);
1443: for (ii=1;ii<ctx->num_grids;ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1444: PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg);
1445: PetscOptionsInt("-dm_landau_amr_z_refine1", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg);
1446: PetscOptionsInt("-dm_landau_amr_z_refine2", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg);
1447: PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg);
1448: PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg);
1449: PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg);
1450: /* spherical domain (not used) */
1451: PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL);
1452: PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg);
1453: PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg);
1454: PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg);
1455: if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1456: if (!flg) {
1457: ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0;
1458: }
1459: nt = LANDAU_MAX_GRIDS;
1460: PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg);
1461: if (flg && !sph_flg) ctx->sphere = PETSC_TRUE;
1462: if (!flg) {
1463: ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain
1464: }
1467: /* processing options */
1468: PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL);
1469: if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos
1470: PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL);
1472: }
1473: PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL);
1475: PetscOptionsEnd();
1477: for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0;
1478: if (ctx->verbose > 0) {
1479: PetscPrintf(ctx->comm, "masses: e=%10.3e; ions in proton mass units: %10.3e %10.3e ...\n",ctx->masses[0],ctx->masses[1]/1.6720e-27,ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0);
1480: PetscPrintf(ctx->comm, "charges: e=%10.3e; charges in elementary units: %10.3e %10.3e\n", ctx->charges[0],-ctx->charges[1]/ctx->charges[0],ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0);
1481: PetscPrintf(ctx->comm, "n: e: %10.3e i: %10.3e %10.3e\n", ctx->n[0],ctx->n[1],ctx->num_species>2 ? ctx->n[2] : 0);
1482: PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", ctx->thermal_temps[0], ctx->thermal_temps[1], (ctx->num_species>2) ? ctx->thermal_temps[2] : 0, ctx->v_0, ctx->v_0/SPEED_OF_LIGHT, ctx->n_0, ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",ctx->batch_sz);
1483: PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",0,ctx->radius[0],ctx->numAMRRefine[0]);
1484: for (ii=1;ii<ctx->num_grids;ii++) PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",ii,ctx->radius[ii],ctx->numAMRRefine[ii]);
1485: PetscPrintf(ctx->comm,"\n");
1486: if (ctx->jacobian_field_major_order) {
1487: PetscPrintf(ctx->comm,"Using field major order for GPU Jacobian\n");
1488: } else {
1489: PetscPrintf(ctx->comm,"Using default Plex order for all matrices\n");
1490: }
1491: }
1492: DMDestroy(&dummy);
1493: {
1494: PetscMPIInt rank;
1495: MPI_Comm_rank(ctx->comm, &rank);
1496: ctx->stage = 0;
1497: PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13]); /* 13 */
1498: PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2]); /* 2 */
1499: PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12]); /* 12 */
1500: PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15]); /* 15 */
1501: PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14]); /* 14 */
1502: PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11]); /* 11 */
1503: PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]); /* 0 */
1504: PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]); /* 9 */
1505: PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]); /* 10 */
1506: PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]); /* 7 */
1507: PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]); /* 1 */
1508: PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]); /* 3 */
1509: PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]); /* 8 */
1510: PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4]); /* 4 */
1511: PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16]); /* 16 */
1512: PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]); /* 5 */
1513: PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6]); /* 6 */
1515: if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1516: PetscOptionsClearValue(NULL,"-snes_converged_reason");
1517: PetscOptionsClearValue(NULL,"-ksp_converged_reason");
1518: PetscOptionsClearValue(NULL,"-snes_monitor");
1519: PetscOptionsClearValue(NULL,"-ksp_monitor");
1520: PetscOptionsClearValue(NULL,"-ts_monitor");
1521: PetscOptionsClearValue(NULL,"-ts_view");
1522: PetscOptionsClearValue(NULL,"-ts_adapt_monitor");
1523: PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view");
1524: PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view");
1525: PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view");
1526: PetscOptionsClearValue(NULL,"-dm_landau_mass_view");
1527: PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view");
1528: PetscOptionsClearValue(NULL,"-dm_landau_mat_view");
1529: PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_converged_reason");
1530: PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_monitor");
1531: PetscOptionsClearValue(NULL,"-");
1532: PetscOptionsClearValue(NULL,"-info");
1533: }
1534: }
1535: return 0;
1536: }
1538: static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx)
1539: {
1540: PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
1541: PetscQuadrature quad;
1542: const PetscReal *quadWeights;
1543: PetscInt numCells[LANDAU_MAX_GRIDS],Nq,Nf[LANDAU_MAX_GRIDS], ncellsTot=0;
1544: PetscTabulation *Tf;
1545: PetscDS prob;
1547: DMGetDS(ctx->plex[0], &prob); // same DS for all grids
1548: PetscDSGetTabulation(prob, &Tf); // Bf, &Df same for all grids
1549: /* DS, Tab and quad is same on all grids */
1551: PetscFEGetQuadrature(ctx->fe[0], &quad);
1552: PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights);
1554: /* setup each grid */
1555: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
1556: PetscInt cStart, cEnd;
1558: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
1559: numCells[grid] = cEnd - cStart; // grids can have different topology
1560: DMGetLocalSection(ctx->plex[grid], §ion[grid]);
1561: DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);
1562: PetscSectionGetNumFields(section[grid], &Nf[grid]);
1563: ncellsTot += numCells[grid];
1564: }
1565: #define MAP_BF_SIZE (64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES)
1566: /* create GPU assembly data */
1567: if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1568: PetscContainer container;
1569: PetscScalar elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat;
1570: pointInterpolationP4est pointMaps[MAP_BF_SIZE][LANDAU_MAX_Q_FACE];
1571: P4estVertexMaps *maps;
1572: const PetscInt *plex_batch=NULL,Nb=Nq; // tensor elements;
1573: LandauIdx *coo_elem_offsets=NULL, *coo_elem_fullNb=NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = NULL;
1574: /* create GPU asssembly data */
1575: PetscInfo(ctx->plex[0], "Make GPU maps %d\n",1);
1576: PetscLogEventBegin(ctx->events[2],0,0,0,0);
1577: PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps);
1579: if (ctx->coo_assembly) { // setup COO assembly -- put COO metadata directly in ctx->SData_d
1580: PetscMalloc3(ncellsTot+1,&coo_elem_offsets,ncellsTot,&coo_elem_fullNb,ncellsTot, &coo_elem_point_offsets); // array of integer pointers
1581: coo_elem_offsets[0] = 0; // finish later
1582: PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n",ncellsTot);
1583: ctx->SData_d.coo_n_cellsTot = ncellsTot;
1584: ctx->SData_d.coo_elem_offsets = (void*)coo_elem_offsets;
1585: ctx->SData_d.coo_elem_fullNb = (void*)coo_elem_fullNb;
1586: ctx->SData_d.coo_elem_point_offsets = (void*)coo_elem_point_offsets;
1587: } else {
1588: ctx->SData_d.coo_elem_offsets = ctx->SData_d.coo_elem_fullNb = NULL;
1589: ctx->SData_d.coo_elem_point_offsets = NULL;
1590: ctx->SData_d.coo_n_cellsTot = 0;
1591: }
1593: ctx->SData_d.coo_max_fullnb = 0;
1594: for (PetscInt grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1595: PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc*Nq;
1596: if (grid_batch_is_inv[grid]) {
1597: ISGetIndices(grid_batch_is_inv[grid], &plex_batch);
1598: }
1599: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
1600: // make maps
1601: maps[grid].d_self = NULL;
1602: maps[grid].num_elements = numCells[grid];
1603: maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q
1604: maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2
1605: maps[grid].num_reduced = 0;
1606: maps[grid].deviceType = ctx->deviceType;
1607: maps[grid].numgrids = ctx->num_grids;
1608: // count reduced and get
1609: PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx);
1610: for (int ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1611: if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx+1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1612: for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1613: int fullNb = 0;
1614: for (int q = 0; q < Nb; ++q) {
1615: PetscInt numindices,*indices;
1616: PetscScalar *valuesOrig = elMat = elemMatrix;
1617: PetscArrayzero(elMat, totDim*totDim);
1618: elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1;
1619: DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
1620: for (PetscInt f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal
1621: if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) {
1622: // found it
1623: if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1624: if (plex_batch) {
1625: maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx) plex_batch[indices[f]];
1626: } else {
1627: maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1628: }
1629: fullNb++;
1630: } else { //found a constraint
1631: int jj = 0;
1632: PetscReal sum = 0;
1633: const PetscInt ff = f;
1634: maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1
1636: do { // constraints are continuous in Plex - exploit that here
1637: int ii; // get 'scale'
1638: for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1639: if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1640: pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]);
1641: }
1642: }
1643: sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1644: // get 'gid'
1645: if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1646: else {
1647: if (plex_batch) {
1648: pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1649: } else {
1650: pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1651: }
1652: fullNb++;
1653: }
1654: } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1655: while (jj < maps[grid].num_face) {
1656: pointMaps[maps[grid].num_reduced][jj].scale = 0;
1657: pointMaps[maps[grid].num_reduced][jj].gid = -1;
1658: jj++;
1659: }
1660: if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug
1661: int d,f;
1662: PetscReal tmp = 0;
1663: PetscPrintf(PETSC_COMM_SELF,"\t\t%" PetscInt_FMT ".%" PetscInt_FMT ".%" PetscInt_FMT ") ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%" PetscInt_FMT ")\n",eidx,q,fieldA,sum,LANDAU_MAX_Q_FACE,maps[grid].num_face);
1664: for (d = 0, tmp = 0; d < numindices; ++d) {
1665: if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) PetscPrintf(PETSC_COMM_WORLD,"%3" PetscInt_FMT ") %3" PetscInt_FMT ": ",d,indices[d]);
1666: for (f = 0; f < numindices; ++f) {
1667: tmp += PetscRealPart(elMat[d*numindices + f]);
1668: }
1669: if (tmp!=0) PetscPrintf(ctx->comm," | %22.16e\n",tmp);
1670: }
1671: }
1672: maps[grid].num_reduced++;
1674: }
1675: break;
1676: }
1677: }
1678: // cleanup
1679: DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
1680: if (elMat != valuesOrig) DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat);
1681: }
1682: if (ctx->coo_assembly) { // setup COO assembly
1683: coo_elem_offsets[glb_elem_idx+1] += fullNb*fullNb; // one species block, adds a block for each species, on this element in this grid
1684: if (fieldA==0) { // cache full Nb for this element, on this grid per species
1685: coo_elem_fullNb[glb_elem_idx] = fullNb;
1686: if (fullNb>ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1688: }
1689: } // field
1690: } // cell
1691: // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1692: PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps);
1693: for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1694: for (int q = 0; q < maps[grid].num_face; ++q) {
1695: maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1696: maps[grid].c_maps[ej][q].gid = pointMaps[ej][q].gid;
1697: }
1698: }
1699: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1700: if (ctx->deviceType == LANDAU_KOKKOS) {
1701: LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid); // imples Kokkos does
1702: } // else could be CUDA
1703: #endif
1704: #if defined(PETSC_HAVE_CUDA)
1705: if (ctx->deviceType == LANDAU_CUDA) {
1706: LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid);
1707: }
1708: #endif
1709: if (plex_batch) {
1710: ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch);
1711: ISDestroy(&grid_batch_is_inv[grid]); // we are done with this
1712: }
1713: } /* grids */
1714: // finish COO
1715: if (ctx->coo_assembly) { // setup COO assembly
1716: PetscInt *oor, *ooc;
1717: ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot]*ctx->batch_sz;
1718: PetscMalloc2(ctx->SData_d.coo_size,&oor,ctx->SData_d.coo_size,&ooc);
1719: for (int i=0;i<ctx->SData_d.coo_size;i++) oor[i] = ooc[i] = -1;
1720: // get
1721: for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1722: for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1723: const int fullNb = coo_elem_fullNb[glb_elem_idx];
1724: const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1725: coo_elem_point_offsets[glb_elem_idx][0] = 0;
1726: for (int f=0, cnt2=0;f<Nb;f++) {
1727: int idx = Idxs[f];
1728: coo_elem_point_offsets[glb_elem_idx][f+1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1729: if (idx >= 0) {
1730: cnt2++;
1731: coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1732: } else {
1733: idx = -idx - 1;
1734: for (int q = 0 ; q < maps[grid].num_face; q++) {
1735: if (maps[grid].c_maps[idx][q].gid < 0) break;
1736: cnt2++;
1737: coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1738: }
1739: }
1741: }
1743: }
1744: }
1745: // set
1746: for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
1747: for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1748: const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
1749: for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1750: const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
1751: // set (i,j)
1752: for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1753: const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1754: int rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
1755: for (int f = 0; f < Nb; ++f) {
1756: const int nr = coo_elem_point_offsets[glb_elem_idx][f+1] - coo_elem_point_offsets[glb_elem_idx][f];
1757: if (nr==1) rows[0] = Idxs[f];
1758: else {
1759: const int idx = -Idxs[f] - 1;
1760: for (int q = 0; q < nr; q++) {
1761: rows[q] = maps[grid].c_maps[idx][q].gid;
1762: }
1763: }
1764: for (int g = 0; g < Nb; ++g) {
1765: const int nc = coo_elem_point_offsets[glb_elem_idx][g+1] - coo_elem_point_offsets[glb_elem_idx][g];
1766: if (nc==1) cols[0] = Idxs[g];
1767: else {
1768: const int idx = -Idxs[g] - 1;
1769: for (int q = 0; q < nc; q++) {
1770: cols[q] = maps[grid].c_maps[idx][q].gid;
1771: }
1772: }
1773: const int idx0 = b_id*coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1774: for (int q = 0, idx = idx0; q < nr; q++) {
1775: for (int d = 0; d < nc; d++, idx++) {
1776: oor[idx] = rows[q] + moffset;
1777: ooc[idx] = cols[d] + moffset;
1778: }
1779: }
1780: }
1781: }
1782: }
1783: } // cell
1784: } // grid
1785: } // batch
1786: MatSetPreallocationCOO(ctx->J,ctx->SData_d.coo_size,oor,ooc);
1787: PetscFree2(oor,ooc);
1788: }
1789: PetscContainerCreate(PETSC_COMM_SELF, &container);
1790: PetscContainerSetPointer(container, (void *)maps);
1791: PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy);
1792: PetscObjectCompose((PetscObject) ctx->J, "assembly_maps", (PetscObject) container);
1793: PetscContainerDestroy(&container);
1794: PetscLogEventEnd(ctx->events[2],0,0,0,0);
1795: } // end GPU assembly
1796: { /* create static point data, Jacobian called first, only one vertex copy */
1797: PetscReal *invJe,*ww,*xx,*yy,*zz=NULL,*invJ_a;
1798: PetscInt outer_ipidx, outer_ej,grid, nip_glb = 0;
1799: PetscFE fe;
1800: const PetscInt Nb = Nq;
1801: PetscLogEventBegin(ctx->events[7],0,0,0,0);
1802: PetscInfo(ctx->plex[0], "Initialize static data\n");
1803: for (PetscInt grid=0;grid<ctx->num_grids;grid++) nip_glb += Nq*numCells[grid];
1804: /* collect f data, first time is for Jacobian, but make mass now */
1805: if (ctx->verbose > 0) {
1806: PetscInt ncells = 0, N;
1807: MatGetSize(ctx->J,&N,NULL);
1808: for (PetscInt grid=0;grid<ctx->num_grids;grid++) ncells += numCells[grid];
1809: PetscPrintf(ctx->comm,"%" PetscInt_FMT ") %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n",0,"FormLandau",nip_glb,ncells, Nb, Nq, dim, Nb, ctx->num_species, Nb, dim, N);
1810: }
1811: PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a);
1812: if (dim==3) {
1813: PetscMalloc1(nip_glb,&zz);
1814: }
1815: if (ctx->use_energy_tensor_trick) {
1816: PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe);
1817: PetscObjectSetName((PetscObject) fe, "energy");
1818: }
1819: /* init each grids static data - no batch */
1820: for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) { // OpenMP (once)
1821: Vec v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1822: PetscSection e_section;
1823: DM dmEnergy;
1824: PetscInt cStart, cEnd, ej;
1826: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
1827: // prep energy trick, get v^2 / 2 vector
1828: if (ctx->use_energy_tensor_trick) {
1829: PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1830: Vec glob_v2;
1831: PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
1833: DMClone(ctx->plex[grid], &dmEnergy);
1834: PetscObjectSetName((PetscObject) dmEnergy, "energy");
1835: DMSetField(dmEnergy, 0, NULL, (PetscObject)fe);
1836: DMCreateDS(dmEnergy);
1837: DMGetSection(dmEnergy, &e_section);
1838: DMGetGlobalVector(dmEnergy,&glob_v2);
1839: PetscObjectSetName((PetscObject) glob_v2, "trick");
1840: c2_0[0] = &data[0];
1841: DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2);
1842: DMGetLocalVector(dmEnergy, &v2_2);
1843: VecZeroEntries(v2_2); /* zero BCs so don't set */
1844: DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2);
1845: DMGlobalToLocalEnd (dmEnergy, glob_v2, INSERT_VALUES, v2_2);
1846: DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view");
1847: VecViewFromOptions(glob_v2,NULL, "-energy_vec_view");
1848: DMRestoreGlobalVector(dmEnergy, &glob_v2);
1849: }
1850: /* append part of the IP data for each grid */
1851: for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) {
1852: PetscScalar *coefs = NULL;
1853: PetscReal vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1854: invJe = invJ_a + outer_ej*Nq*dim*dim;
1855: DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJe, detJj);
1856: if (ctx->use_energy_tensor_trick) {
1857: DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs);
1858: }
1859: /* create static point data */
1860: for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1861: const PetscInt gidx = outer_ipidx;
1862: const PetscReal *invJ = &invJe[qj*dim*dim];
1863: ww [gidx] = detJj[qj] * quadWeights[qj];
1864: if (dim==2) ww [gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
1865: // get xx, yy, zz
1866: if (ctx->use_energy_tensor_trick) {
1867: double refSpaceDer[3],eGradPhi[3];
1868: const PetscReal * const DD = Tf[0]->T[1];
1869: const PetscReal *Dq = &DD[qj*Nb*dim];
1870: for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1871: for (int b = 0; b < Nb; ++b) {
1872: for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]);
1873: }
1874: xx[gidx] = 1e10;
1875: if (ctx->use_relativistic_corrections) {
1876: double dg2_c2 = 0;
1877: //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1878: for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1879: dg2_c2 *= (double)c02;
1880: if (dg2_c2 >= .999) {
1881: xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1882: yy[gidx] = vj[qj * dim + 1];
1883: if (dim==3) zz[gidx] = vj[qj * dim + 2];
1884: PetscPrintf(ctx->comm,"Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, xx[gidx],yy[gidx],zz[gidx]);
1885: } else {
1886: PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2);
1887: for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1888: // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1889: }
1890: }
1891: if (xx[gidx] == 1e10) {
1892: for (int d = 0; d < dim; ++d) {
1893: for (int e = 0 ; e < dim; ++e) {
1894: eGradPhi[d] += invJ[e*dim+d]*refSpaceDer[e];
1895: }
1896: }
1897: xx[gidx] = eGradPhi[0];
1898: yy[gidx] = eGradPhi[1];
1899: if (dim==3) zz[gidx] = eGradPhi[2];
1900: }
1901: } else {
1902: xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1903: yy[gidx] = vj[qj * dim + 1];
1904: if (dim==3) zz[gidx] = vj[qj * dim + 2];
1905: }
1906: } /* q */
1907: if (ctx->use_energy_tensor_trick) {
1908: DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs);
1909: }
1910: } /* ej */
1911: if (ctx->use_energy_tensor_trick) {
1912: DMRestoreLocalVector(dmEnergy, &v2_2);
1913: DMDestroy(&dmEnergy);
1914: }
1915: } /* grid */
1916: if (ctx->use_energy_tensor_trick) {
1917: PetscFEDestroy(&fe);
1918: }
1919: /* cache static data */
1920: if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
1921: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
1922: PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1923: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1924: for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) {
1925: invMass[ii] = ctx->m_0/ctx->masses[ii];
1926: nu_alpha[ii] = PetscSqr(ctx->charges[ii]/ctx->m_0)*ctx->m_0/ctx->masses[ii];
1927: nu_beta[ii] = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
1928: }
1929: }
1930: if (ctx->deviceType == LANDAU_CUDA) {
1931: #if defined(PETSC_HAVE_CUDA)
1932: PetscCall(LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1933: nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1934: #else
1935: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type cuda not built");
1936: #endif
1937: } else if (ctx->deviceType == LANDAU_KOKKOS) {
1938: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1939: PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1940: nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d));
1941: #else
1942: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type kokkos not built");
1943: #endif
1944: }
1945: #endif
1946: /* free */
1947: PetscFree4(ww,xx,yy,invJ_a);
1948: if (dim==3) {
1949: PetscFree(zz);
1950: }
1951: } else { /* CPU version, just copy in, only use part */
1952: ctx->SData_d.w = (void*)ww;
1953: ctx->SData_d.x = (void*)xx;
1954: ctx->SData_d.y = (void*)yy;
1955: ctx->SData_d.z = (void*)zz;
1956: ctx->SData_d.invJ = (void*)invJ_a;
1957: }
1958: PetscLogEventEnd(ctx->events[7],0,0,0,0);
1959: } // initialize
1960: return 0;
1961: }
1963: /* < v, u > */
1964: static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1965: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1966: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1967: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1968: {
1969: g0[0] = 1.;
1970: }
1972: /* < v, u > */
1973: static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1974: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1975: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1976: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1977: {
1978: static double ttt = 1;
1979: g0[0] = ttt++;
1980: }
1982: /* < v, u > */
1983: static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1984: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1985: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1986: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1987: {
1988: g0[0] = 2.*PETSC_PI*x[0];
1989: }
1991: static PetscErrorCode MatrixNfDestroy(void *ptr)
1992: {
1993: PetscInt *nf = (PetscInt *)ptr;
1994: PetscFree(nf);
1995: return 0;
1996: }
1998: static PetscErrorCode LandauCreateMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
1999: {
2000: PetscInt *idxs=NULL;
2001: Mat subM[LANDAU_MAX_GRIDS];
2003: if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2004: return 0;
2005: }
2006: // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is'
2007: if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2008: PetscMalloc1(ctx->mat_offset[ctx->num_grids]*ctx->batch_sz, &idxs);
2009: }
2010: for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2011: const PetscInt *values, n = ctx->mat_offset[grid+1] - ctx->mat_offset[grid];
2012: Mat gMat;
2013: DM massDM;
2014: PetscDS prob;
2015: Vec tvec;
2016: // get "mass" matrix for reordering
2017: DMClone(ctx->plex[grid], &massDM);
2018: DMCopyFields(ctx->plex[grid], massDM);
2019: DMCreateDS(massDM);
2020: DMGetDS(massDM, &prob);
2021: for (int ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2022: PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL);
2023: }
2024: PetscOptionsInsertString(NULL,"-dm_preallocate_only");
2025: DMSetFromOptions(massDM);
2026: DMCreateMatrix(massDM, &gMat);
2027: PetscOptionsInsertString(NULL,"-dm_preallocate_only false");
2028: MatSetOption(gMat,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
2029: MatSetOption(gMat,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE);
2030: DMCreateLocalVector(ctx->plex[grid],&tvec);
2031: DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx);
2032: MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view");
2033: DMDestroy(&massDM);
2034: VecDestroy(&tvec);
2035: subM[grid] = gMat;
2036: if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2037: MatOrderingType rtype = MATORDERINGRCM;
2038: IS isrow,isicol;
2039: MatGetOrdering(gMat,rtype,&isrow,&isicol);
2040: ISInvertPermutation(isrow,PETSC_DECIDE,&grid_batch_is_inv[grid]);
2041: ISGetIndices(isrow, &values);
2042: for (PetscInt b_id=0 ; b_id < ctx->batch_sz ; b_id++) { // add batch size DMs for this species grid
2043: #if !defined(LANDAU_SPECIES_MAJOR)
2044: PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id*N;
2045: for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
2046: #else
2047: PetscInt n0 = ctx->mat_offset[grid]*ctx->batch_sz + b_id*n;
2048: for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
2049: #endif
2050: }
2051: ISRestoreIndices(isrow, &values);
2052: ISDestroy(&isrow);
2053: ISDestroy(&isicol);
2054: }
2055: }
2056: if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2057: ISCreateGeneral(comm,ctx->mat_offset[ctx->num_grids]*ctx->batch_sz,idxs,PETSC_OWN_POINTER,&ctx->batch_is);
2058: }
2059: // get a block matrix
2060: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2061: Mat B = subM[grid];
2062: PetscInt nloc, nzl, colbuf[1024], row;
2063: MatGetSize(B, &nloc, NULL);
2064: for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2065: const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2066: const PetscInt *cols;
2067: const PetscScalar *vals;
2068: for (int i=0 ; i<nloc ; i++) {
2069: MatGetRow(B,i,&nzl,&cols,&vals);
2071: for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2072: row = i + moffset;
2073: MatSetValues(ctx->J,1,&row,nzl,colbuf,vals,INSERT_VALUES);
2074: MatRestoreRow(B,i,&nzl,&cols,&vals);
2075: }
2076: }
2077: }
2078: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2079: MatDestroy(&subM[grid]);
2080: }
2081: MatAssemblyBegin(ctx->J,MAT_FINAL_ASSEMBLY);
2082: MatAssemblyEnd(ctx->J,MAT_FINAL_ASSEMBLY);
2084: if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2085: Mat mat_block_order;
2086: MatCreateSubMatrix(ctx->J,ctx->batch_is,ctx->batch_is,MAT_INITIAL_MATRIX,&mat_block_order); // use MatPermute
2087: MatViewFromOptions(mat_block_order, NULL, "-dm_landau_field_major_mat_view");
2088: MatDestroy(&ctx->J);
2089: ctx->J = mat_block_order;
2090: // override ops to make KSP work in field major space
2091: ctx->seqaij_mult = mat_block_order->ops->mult;
2092: mat_block_order->ops->mult = LandauMatMult;
2093: mat_block_order->ops->multadd = LandauMatMultAdd;
2094: ctx->seqaij_solve = NULL;
2095: ctx->seqaij_getdiagonal = mat_block_order->ops->getdiagonal;
2096: mat_block_order->ops->getdiagonal = LandauMatGetDiagonal;
2097: ctx->seqaij_multtranspose = mat_block_order->ops->multtranspose;
2098: mat_block_order->ops->multtranspose = LandauMatMultTranspose;
2099: VecDuplicate(X,&ctx->work_vec);
2100: VecScatterCreate(X, ctx->batch_is, ctx->work_vec, NULL, &ctx->plex_batch);
2101: }
2103: return 0;
2104: }
2106: PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
2107: /*@C
2108: DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh
2110: Collective on comm
2112: Input Parameters:
2113: + comm - The MPI communicator
2114: . dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
2115: - prefix - prefix for options (not tested)
2117: Output Parameter:
2118: . pack - The DM object representing the mesh
2119: + X - A vector (user destroys)
2120: - J - Optional matrix (object destroys)
2122: Level: beginner
2124: .keywords: mesh
2125: .seealso: DMPlexCreate(), DMPlexLandauDestroyVelocitySpace()
2126: @*/
2127: PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
2128: {
2129: LandauCtx *ctx;
2130: Vec Xsub[LANDAU_MAX_GRIDS];
2131: IS grid_batch_is_inv[LANDAU_MAX_GRIDS];
2135: PetscNew(&ctx);
2136: ctx->comm = comm; /* used for diagnostics and global errors */
2137: /* process options */
2138: ProcessOptions(ctx,prefix);
2139: if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE;
2140: /* Create Mesh */
2141: DMCompositeCreate(PETSC_COMM_SELF,pack);
2142: PetscLogEventBegin(ctx->events[13],0,0,0,0);
2143: PetscLogEventBegin(ctx->events[15],0,0,0,0);
2144: LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack); // creates grids (Forest of AMR)
2145: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2146: /* create FEM */
2147: SetupDS(ctx->plex[grid],dim,grid,ctx);
2148: /* set initial state */
2149: DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid]);
2150: PetscObjectSetName((PetscObject) Xsub[grid], "u_orig");
2151: /* initial static refinement, no solve */
2152: LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, ctx);
2153: /* forest refinement - forest goes in (if forest), plex comes out */
2154: if (ctx->use_p4est) {
2155: DM plex;
2156: adapt(grid,ctx,&Xsub[grid]); // forest goes in, plex comes out
2157: DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view"); // need to differentiate - todo
2158: VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view");
2159: // convert to plex, all done with this level
2160: DMConvert(ctx->plex[grid], DMPLEX, &plex);
2161: DMDestroy(&ctx->plex[grid]);
2162: ctx->plex[grid] = plex;
2163: }
2164: #if !defined(LANDAU_SPECIES_MAJOR)
2165: DMCompositeAddDM(*pack,ctx->plex[grid]);
2166: #else
2167: for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2168: DMCompositeAddDM(*pack,ctx->plex[grid]);
2169: }
2170: #endif
2171: DMSetApplicationContext(ctx->plex[grid], ctx);
2172: }
2173: #if !defined(LANDAU_SPECIES_MAJOR)
2174: // stack the batched DMs, could do it all here!!! b_id=0
2175: for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2176: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2177: DMCompositeAddDM(*pack,ctx->plex[grid]);
2178: }
2179: }
2180: #endif
2181: // create ctx->mat_offset
2182: ctx->mat_offset[0] = 0;
2183: for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2184: PetscInt n;
2185: VecGetLocalSize(Xsub[grid],&n);
2186: ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + n;
2187: }
2188: // creat DM & Jac
2189: DMSetApplicationContext(*pack, ctx);
2190: PetscOptionsInsertString(NULL,"-dm_preallocate_only");
2191: DMSetFromOptions(*pack);
2192: DMCreateMatrix(*pack, &ctx->J);
2193: PetscOptionsInsertString(NULL,"-dm_preallocate_only false");
2194: MatSetOption(ctx->J,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
2195: MatSetOption(ctx->J,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE);
2196: PetscObjectSetName((PetscObject)ctx->J, "Jac");
2197: // construct initial conditions in X
2198: DMCreateGlobalVector(*pack,X);
2199: for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2200: PetscInt n;
2201: VecGetLocalSize(Xsub[grid],&n);
2202: for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2203: PetscScalar const *values;
2204: const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2205: LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx);
2206: VecGetArrayRead(Xsub[grid],&values);
2207: for (int i=0, idx = moffset; i<n; i++, idx++) {
2208: VecSetValue(*X,idx,values[i],INSERT_VALUES);
2209: }
2210: VecRestoreArrayRead(Xsub[grid],&values);
2211: }
2212: }
2213: // cleanup
2214: for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2215: VecDestroy(&Xsub[grid]);
2216: }
2217: /* check for correct matrix type */
2218: if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2219: PetscBool flg;
2220: if (ctx->deviceType == LANDAU_CUDA) {
2221: PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"");
2223: } else if (ctx->deviceType == LANDAU_KOKKOS) {
2224: PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"");
2225: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2227: #else
2229: #endif
2230: }
2231: }
2232: PetscLogEventEnd(ctx->events[15],0,0,0,0);
2233: // create field major ordering
2235: ctx->work_vec = NULL;
2236: ctx->plex_batch = NULL;
2237: ctx->batch_is = NULL;
2238: for (int i=0;i<LANDAU_MAX_GRIDS;i++) grid_batch_is_inv[i] = NULL;
2239: PetscLogEventBegin(ctx->events[12],0,0,0,0);
2240: LandauCreateMatrix(comm, *X, grid_batch_is_inv, ctx);
2241: PetscLogEventEnd(ctx->events[12],0,0,0,0);
2243: // create AMR GPU assembly maps and static GPU data
2244: CreateStaticGPUData(dim,grid_batch_is_inv,ctx);
2246: PetscLogEventEnd(ctx->events[13],0,0,0,0);
2248: // create mass matrix
2249: DMPlexLandauCreateMassMatrix(*pack, NULL);
2251: if (J) *J = ctx->J;
2253: if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2254: PetscContainer container;
2255: // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2256: PetscContainerCreate(PETSC_COMM_SELF, &container);
2257: PetscContainerSetPointer(container, (void *)ctx);
2258: PetscObjectCompose((PetscObject) ctx->J, "LandauCtx", (PetscObject) container);
2259: PetscContainerDestroy(&container);
2260: // batch solvers need to map -- can batch solvers work
2261: PetscContainerCreate(PETSC_COMM_SELF, &container);
2262: PetscContainerSetPointer(container, (void *)ctx->plex_batch);
2263: PetscObjectCompose((PetscObject) ctx->J, "plex_batch_is", (PetscObject) container);
2264: PetscContainerDestroy(&container);
2265: }
2266: // for batch solvers
2267: {
2268: PetscContainer container;
2269: PetscInt *pNf;
2270: PetscContainerCreate(PETSC_COMM_SELF, &container);
2271: PetscMalloc1(sizeof(*pNf), &pNf);
2272: *pNf = ctx->batch_sz;
2273: PetscContainerSetPointer(container, (void *)pNf);
2274: PetscContainerSetUserDestroy(container, MatrixNfDestroy);
2275: PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject) container);
2276: PetscContainerDestroy(&container);
2277: }
2279: return 0;
2280: }
2282: /*@
2283: DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
2285: Collective on dm
2287: Input/Output Parameters:
2288: . dm - the dm to destroy
2290: Level: beginner
2292: .keywords: mesh
2293: .seealso: DMPlexLandauCreateVelocitySpace()
2294: @*/
2295: PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2296: {
2297: LandauCtx *ctx;
2298: DMGetApplicationContext(*dm, &ctx);
2299: MatDestroy(&ctx->M);
2300: MatDestroy(&ctx->J);
2301: for (PetscInt ii=0;ii<ctx->num_species;ii++) PetscFEDestroy(&ctx->fe[ii]);
2302: ISDestroy(&ctx->batch_is);
2303: VecDestroy(&ctx->work_vec);
2304: VecScatterDestroy(&ctx->plex_batch);
2305: if (ctx->deviceType == LANDAU_CUDA) {
2306: #if defined(PETSC_HAVE_CUDA)
2307: LandauCUDAStaticDataClear(&ctx->SData_d);
2308: #else
2309: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
2310: #endif
2311: } else if (ctx->deviceType == LANDAU_KOKKOS) {
2312: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2313: LandauKokkosStaticDataClear(&ctx->SData_d);
2314: #else
2315: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
2316: #endif
2317: } else {
2318: if (ctx->SData_d.x) { /* in a CPU run */
2319: PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
2320: LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
2321: PetscFree4(ww,xx,yy,invJ);
2322: if (zz) {
2323: PetscFree(zz);
2324: }
2325: if (coo_elem_offsets) {
2326: PetscFree3(coo_elem_offsets,coo_elem_fullNb,coo_elem_point_offsets); // could be NULL
2327: }
2328: }
2329: }
2331: if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2332: PetscPrintf(ctx->comm, "TSStep N 1.0 %10.3e\n",ctx->times[LANDAU_EX2_TSSOLVE]);
2333: PetscPrintf(ctx->comm, "2: Solve: %10.3e with %" PetscInt_FMT " threads\n",ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL],ctx->batch_sz);
2334: PetscPrintf(ctx->comm, "3: Landau: %10.3e\n",ctx->times[LANDAU_MATRIX_TOTAL]);
2335: PetscPrintf(ctx->comm, "Landau Jacobian %" PetscInt_FMT " 1.0 %10.3e\n",(PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT],ctx->times[LANDAU_JACOBIAN]);
2336: PetscPrintf(ctx->comm, "Landau Operator N 1.0 %10.3e\n",ctx->times[LANDAU_OPERATOR]);
2337: PetscPrintf(ctx->comm, "Landau Mass N 1.0 %10.3e\n",ctx->times[LANDAU_MASS]);
2338: PetscPrintf(ctx->comm, " Jac-f-df (GPU) N 1.0 %10.3e\n",ctx->times[LANDAU_F_DF]);
2339: PetscPrintf(ctx->comm, " Kernel (GPU) N 1.0 %10.3e\n",ctx->times[LANDAU_KERNEL]);
2340: PetscPrintf(ctx->comm, "MatLUFactorNum X 1.0 %10.3e\n",ctx->times[KSP_FACTOR]);
2341: PetscPrintf(ctx->comm, "MatSolve X 1.0 %10.3e\n",ctx->times[KSP_SOLVE]);
2342: }
2343: for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2344: DMDestroy(&ctx->plex[grid]);
2345: }
2346: PetscFree(ctx);
2347: DMDestroy(dm);
2348: return 0;
2349: }
2351: /* < v, ru > */
2352: static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2353: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2354: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2355: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2356: {
2357: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2358: f0[0] = u[ii];
2359: }
2361: /* < v, ru > */
2362: static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2363: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2364: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2365: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2366: {
2367: PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2368: f0[0] = x[jj]*u[ii]; /* x momentum */
2369: }
2371: static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2372: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2373: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2374: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2375: {
2376: PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2377: double tmp1 = 0.;
2378: for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2379: f0[0] = tmp1*u[ii];
2380: }
2382: static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2383: {
2384: const PetscReal *c2_0_arr = ((PetscReal*)actx);
2385: const PetscReal c02 = c2_0_arr[0];
2387: for (int s = 0 ; s < Nf ; s++) {
2388: PetscReal tmp1 = 0.;
2389: for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2390: #if defined(PETSC_USE_DEBUG)
2391: u[s] = PetscSqrtReal(1. + tmp1/c02);// u[0] = PetscSqrtReal(1. + xx);
2392: #else
2393: {
2394: PetscReal xx = tmp1/c02;
2395: u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2396: }
2397: #endif
2398: }
2399: return 0;
2400: }
2402: /* < v, ru > */
2403: static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2404: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2405: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2406: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2407: {
2408: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2409: f0[0] = 2.*PETSC_PI*x[0]*u[ii];
2410: }
2412: /* < v, ru > */
2413: static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2414: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2415: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2416: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2417: {
2418: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2419: f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii];
2420: }
2422: static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2423: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2424: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2425: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2426: {
2427: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2428: f0[0] = 2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii];
2429: }
2431: /*@
2432: DMPlexLandauPrintNorms - collects moments and prints them
2434: Collective on dm
2436: Input Parameters:
2437: + X - the state
2438: - stepi - current step to print
2440: Level: beginner
2442: .keywords: mesh
2443: .seealso: DMPlexLandauCreateVelocitySpace()
2444: @*/
2445: PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2446: {
2447: LandauCtx *ctx;
2448: PetscDS prob;
2449: DM pack;
2450: PetscInt cStart, cEnd, dim, ii, i0, nDMs;
2451: PetscScalar xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES];
2452: PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2453: Vec *globXArray;
2455: VecGetDM(X, &pack);
2457: DMGetDimension(pack, &dim);
2459: DMGetApplicationContext(pack, &ctx);
2461: /* print momentum and energy */
2462: DMCompositeGetNumberDM(pack,&nDMs);
2464: PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray);
2465: DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray);
2466: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
2467: Vec Xloc = globXArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2468: DMGetDS(ctx->plex[grid], &prob);
2469: for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) {
2470: PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2471: PetscDSSetConstants(prob, 2, user);
2472: if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */
2473: PetscDSSetObjective(prob, 0, &f0_s_rden);
2474: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2475: density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2476: PetscDSSetObjective(prob, 0, &f0_s_rmom);
2477: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2478: zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2479: PetscDSSetObjective(prob, 0, &f0_s_rv2);
2480: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2481: energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2482: zmomentumtot += zmomentum[ii];
2483: energytot += energy[ii];
2484: densitytot += density[ii];
2485: PetscPrintf(ctx->comm, "%3D) species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));
2486: } else { /* 2/3Xloc + 3V */
2487: PetscDSSetObjective(prob, 0, &f0_s_den);
2488: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2489: density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2490: PetscDSSetObjective(prob, 0, &f0_s_mom);
2491: user[1] = 0;
2492: PetscDSSetConstants(prob, 2, user);
2493: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2494: xmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2495: user[1] = 1;
2496: PetscDSSetConstants(prob, 2, user);
2497: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2498: ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2499: user[1] = 2;
2500: PetscDSSetConstants(prob, 2, user);
2501: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2502: zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2503: if (ctx->use_relativistic_corrections) {
2504: /* gamma * M * f */
2505: if (ii==0 && grid==0) { // do all at once
2506: Vec Mf, globGamma, *globMfArray, *globGammaArray;
2507: PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f};
2508: PetscReal *c2_0[1], data[1];
2510: VecDuplicate(X,&globGamma);
2511: VecDuplicate(X,&Mf);
2512: PetscMalloc(sizeof(*globMfArray)*nDMs, &globMfArray);
2513: PetscMalloc(sizeof(*globMfArray)*nDMs, &globGammaArray);
2514: /* M * f */
2515: MatMult(ctx->M,X,Mf);
2516: /* gamma */
2517: DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);
2518: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2519: Vec v1 = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2520: data[0] = PetscSqr(C_0(ctx->v_0));
2521: c2_0[0] = &data[0];
2522: DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1);
2523: }
2524: DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);
2525: /* gamma * Mf */
2526: DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);
2527: DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray);
2528: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice
2529: PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs;
2530: Vec Mfsub = globMfArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], Gsub = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], v1, v2;
2531: // get each component
2532: VecGetSize(Mfsub,&N);
2533: VecCreate(ctx->comm,&v1);
2534: VecSetSizes(v1,PETSC_DECIDE,N/Nf);
2535: VecCreate(ctx->comm,&v2);
2536: VecSetSizes(v2,PETSC_DECIDE,N/Nf);
2537: VecSetFromOptions(v1); // ???
2538: VecSetFromOptions(v2);
2539: // get each component
2540: VecGetBlockSize(Gsub,&bs);
2542: VecGetBlockSize(Mfsub,&bs);
2544: for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) {
2545: PetscScalar val;
2546: VecStrideGather(Gsub,i,v1,INSERT_VALUES);
2547: VecStrideGather(Mfsub,i,v2,INSERT_VALUES);
2548: VecDot(v1,v2,&val);
2549: energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix];
2550: }
2551: VecDestroy(&v1);
2552: VecDestroy(&v2);
2553: } /* grids */
2554: DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);
2555: DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray);
2556: PetscFree(globGammaArray);
2557: PetscFree(globMfArray);
2558: VecDestroy(&globGamma);
2559: VecDestroy(&Mf);
2560: }
2561: } else {
2562: PetscDSSetObjective(prob, 0, &f0_s_v2);
2563: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2564: energy[ii] = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2565: }
2566: PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(xmomentum[ii]),PetscRealPart(ymomentum[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));
2567: xmomentumtot += xmomentum[ii];
2568: ymomentumtot += ymomentum[ii];
2569: zmomentumtot += zmomentum[ii];
2570: energytot += energy[ii];
2571: densitytot += density[ii];
2572: }
2573: if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n");
2574: }
2575: }
2576: DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray);
2577: PetscFree(globXArray);
2578: /* totals */
2579: DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd);
2580: if (ctx->num_species>1) {
2581: if (dim==2) {
2582: PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);
2583: } else {
2584: PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);
2585: }
2586: } else PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells",cEnd-cStart);
2587: PetscPrintf(ctx->comm,"\n");
2588: return 0;
2589: }
2591: /*@
2592: DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2594: Collective on pack
2596: Input Parameters:
2597: . pack - the DM object
2599: Output Parameters:
2600: . Amat - The mass matrix (optional), mass matrix is added to the DM context
2602: Level: beginner
2604: .keywords: mesh
2605: .seealso: DMPlexLandauCreateVelocitySpace()
2606: @*/
2607: PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2608: {
2609: DM mass_pack,massDM[LANDAU_MAX_GRIDS];
2610: PetscDS prob;
2611: PetscInt ii,dim,N1=1,N2;
2612: LandauCtx *ctx;
2613: Mat packM,subM[LANDAU_MAX_GRIDS];
2617: DMGetApplicationContext(pack, &ctx);
2619: PetscLogEventBegin(ctx->events[14],0,0,0,0);
2620: DMGetDimension(pack, &dim);
2621: DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack);
2622: /* create pack mass matrix */
2623: for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) {
2624: DMClone(ctx->plex[grid], &massDM[grid]);
2625: DMCopyFields(ctx->plex[grid], massDM[grid]);
2626: DMCreateDS(massDM[grid]);
2627: DMGetDS(massDM[grid], &prob);
2628: for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2629: if (dim==3) PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL);
2630: else PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL);
2631: }
2632: #if !defined(LANDAU_SPECIES_MAJOR)
2633: DMCompositeAddDM(mass_pack,massDM[grid]);
2634: #else
2635: for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2636: DMCompositeAddDM(mass_pack,massDM[grid]);
2637: }
2638: #endif
2639: DMCreateMatrix(massDM[grid], &subM[grid]);
2640: }
2641: #if !defined(LANDAU_SPECIES_MAJOR)
2642: // stack the batched DMs
2643: for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2644: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2645: DMCompositeAddDM(mass_pack, massDM[grid]);
2646: }
2647: }
2648: #endif
2649: PetscOptionsInsertString(NULL,"-dm_preallocate_only");
2650: DMSetFromOptions(mass_pack);
2651: DMCreateMatrix(mass_pack, &packM);
2652: PetscOptionsInsertString(NULL,"-dm_preallocate_only false");
2653: MatSetOption(packM,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
2654: MatSetOption(packM,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE);
2655: DMDestroy(&mass_pack);
2656: /* make mass matrix for each block */
2657: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2658: Vec locX;
2659: DM plex = massDM[grid];
2660: DMGetLocalVector(plex, &locX);
2661: /* Mass matrix is independent of the input, so no need to fill locX */
2662: DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx);
2663: DMRestoreLocalVector(plex, &locX);
2664: DMDestroy(&massDM[grid]);
2665: }
2666: MatGetSize(ctx->J, &N1, NULL);
2667: MatGetSize(packM, &N2, NULL);
2669: /* assemble block diagonals */
2670: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2671: Mat B = subM[grid];
2672: PetscInt nloc, nzl, colbuf[1024], row;
2673: MatGetSize(B, &nloc, NULL);
2674: for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2675: const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2676: const PetscInt *cols;
2677: const PetscScalar *vals;
2678: for (int i=0 ; i<nloc ; i++) {
2679: MatGetRow(B,i,&nzl,&cols,&vals);
2681: for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2682: row = i + moffset;
2683: MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES);
2684: MatRestoreRow(B,i,&nzl,&cols,&vals);
2685: }
2686: }
2687: }
2688: // cleanup
2689: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2690: MatDestroy(&subM[grid]);
2691: }
2692: MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY);
2693: MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY);
2694: PetscObjectSetName((PetscObject)packM, "mass");
2695: MatViewFromOptions(packM,NULL,"-dm_landau_mass_view");
2696: ctx->M = packM;
2697: if (Amat) *Amat = packM;
2698: PetscLogEventEnd(ctx->events[14],0,0,0,0);
2699: return 0;
2700: }
2702: /*@
2703: DMPlexLandauIFunction - TS residual calculation
2705: Collective on ts
2707: Input Parameters:
2708: + TS - The time stepping context
2709: . time_dummy - current time (not used)
2710: . X - Current state
2711: . X_t - Time derivative of current state
2712: - actx - Landau context
2714: Output Parameter:
2715: . F - The residual
2717: Level: beginner
2719: .keywords: mesh
2720: .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauIJacobian()
2721: @*/
2722: PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2723: {
2724: LandauCtx *ctx=(LandauCtx*)actx;
2725: PetscInt dim;
2726: DM pack;
2727: #if defined(PETSC_HAVE_THREADSAFETY)
2728: double starttime, endtime;
2729: #endif
2731: TSGetDM(ts,&pack);
2732: DMGetApplicationContext(pack, &ctx);
2734: if (ctx->stage) {
2735: PetscLogStagePush(ctx->stage);
2736: }
2737: PetscLogEventBegin(ctx->events[11],0,0,0,0);
2738: PetscLogEventBegin(ctx->events[0],0,0,0,0);
2739: #if defined(PETSC_HAVE_THREADSAFETY)
2740: starttime = MPI_Wtime();
2741: #endif
2742: DMGetDimension(pack, &dim);
2743: if (!ctx->aux_bool) {
2744: PetscInfo(ts, "Create Landau Jacobian t=%g X=%p %s\n",time_dummy,X_t,ctx->aux_bool ? " -- seems to be in line search" : "");
2745: LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx);
2746: MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view");
2747: ctx->aux_bool = PETSC_TRUE;
2748: } else {
2749: PetscInfo(ts, "Skip forming Jacobian, has not changed (should check norm)\n");
2750: }
2751: /* mat vec for op */
2752: MatMult(ctx->J,X,F); /* C*f */
2753: /* add time term */
2754: if (X_t) {
2755: MatMultAdd(ctx->M,X_t,F,F);
2756: }
2757: #if defined(PETSC_HAVE_THREADSAFETY)
2758: if (ctx->stage) {
2759: endtime = MPI_Wtime();
2760: ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2761: ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2762: ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2763: }
2764: #endif
2765: PetscLogEventEnd(ctx->events[0],0,0,0,0);
2766: PetscLogEventEnd(ctx->events[11],0,0,0,0);
2767: if (ctx->stage) {
2768: PetscLogStagePop();
2769: #if defined(PETSC_HAVE_THREADSAFETY)
2770: ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2771: #endif
2772: }
2773: return 0;
2774: }
2776: /*@
2777: DMPlexLandauIJacobian - TS Jacobian construction
2779: Collective on ts
2781: Input Parameters:
2782: + TS - The time stepping context
2783: . time_dummy - current time (not used)
2784: . X - Current state
2785: . U_tdummy - Time derivative of current state (not used)
2786: . shift - shift for du/dt term
2787: - actx - Landau context
2789: Output Parameters:
2790: + Amat - Jacobian
2791: - Pmat - same as Amat
2793: Level: beginner
2795: .keywords: mesh
2796: .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauIFunction()
2797: @*/
2798: PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2799: {
2800: LandauCtx *ctx=NULL;
2801: PetscInt dim;
2802: DM pack;
2803: #if defined(PETSC_HAVE_THREADSAFETY)
2804: double starttime, endtime;
2805: #endif
2806: TSGetDM(ts,&pack);
2807: DMGetApplicationContext(pack, &ctx);
2810: DMGetDimension(pack, &dim);
2811: /* get collision Jacobian into A */
2812: if (ctx->stage) {
2813: PetscLogStagePush(ctx->stage);
2814: }
2815: PetscLogEventBegin(ctx->events[11],0,0,0,0);
2816: PetscLogEventBegin(ctx->events[9],0,0,0,0);
2817: #if defined(PETSC_HAVE_THREADSAFETY)
2818: starttime = MPI_Wtime();
2819: #endif
2820: PetscInfo(ts, "Adding just mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift);
2823: if (!ctx->use_matrix_mass) {
2824: LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx);
2825: MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view");
2826: } else { /* add mass */
2827: MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN);
2828: }
2829: ctx->aux_bool = PETSC_FALSE;
2830: #if defined(PETSC_HAVE_THREADSAFETY)
2831: if (ctx->stage) {
2832: endtime = MPI_Wtime();
2833: ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2834: ctx->times[LANDAU_MASS] += (endtime - starttime);
2835: }
2836: #endif
2837: PetscLogEventEnd(ctx->events[9],0,0,0,0);
2838: PetscLogEventEnd(ctx->events[11],0,0,0,0);
2839: if (ctx->stage) {
2840: PetscLogStagePop();
2841: #if defined(PETSC_HAVE_THREADSAFETY)
2842: ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2843: #endif
2844: }
2845: return 0;
2846: }