Actual source code: plexdistribute.c

petsc-3.11.0 2019-03-29
Report Typos and Errors
  1:  #include <petsc/private/dmpleximpl.h>
  2:  #include <petsc/private/dmlabelimpl.h>

  4: /*@C
  5:   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback

  7:   Input Parameters:
  8: + dm      - The DM object
  9: . user    - The user callback, may be NULL (to clear the callback)
 10: - ctx     - context for callback evaluation, may be NULL

 12:   Level: advanced

 14:   Notes:
 15:      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.

 17:      Any setting here overrides other configuration of DMPlex adjacency determination.

 19: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
 20: @*/
 21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
 22: {
 23:   DM_Plex *mesh = (DM_Plex *)dm->data;

 27:   mesh->useradjacency = user;
 28:   mesh->useradjacencyctx = ctx;
 29:   return(0);
 30: }

 32: /*@C
 33:   DMPlexGetAdjacencyUser - get the user-defined adjacency callback

 35:   Input Parameter:
 36: . dm      - The DM object

 38:   Output Parameters:
 39: - user    - The user callback
 40: - ctx     - context for callback evaluation

 42:   Level: advanced

 44: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
 45: @*/
 46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
 47: {
 48:   DM_Plex *mesh = (DM_Plex *)dm->data;

 52:   if (user) *user = mesh->useradjacency;
 53:   if (ctx) *ctx = mesh->useradjacencyctx;
 54:   return(0);
 55: }

 57: /*@
 58:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

 60:   Input Parameters:
 61: + dm      - The DM object
 62: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 64:   Level: intermediate

 66: .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 67: @*/
 68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
 69: {
 70:   DM_Plex *mesh = (DM_Plex *) dm->data;

 74:   mesh->useAnchors = useAnchors;
 75:   return(0);
 76: }

 78: /*@
 79:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

 81:   Input Parameter:
 82: . dm      - The DM object

 84:   Output Parameter:
 85: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 87:   Level: intermediate

 89: .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 90: @*/
 91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
 92: {
 93:   DM_Plex *mesh = (DM_Plex *) dm->data;

 98:   *useAnchors = mesh->useAnchors;
 99:   return(0);
100: }

102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104:   const PetscInt *cone = NULL;
105:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106:   PetscErrorCode  ierr;

109:   DMPlexGetConeSize(dm, p, &coneSize);
110:   DMPlexGetCone(dm, p, &cone);
111:   for (c = 0; c <= coneSize; ++c) {
112:     const PetscInt  point   = !c ? p : cone[c-1];
113:     const PetscInt *support = NULL;
114:     PetscInt        supportSize, s, q;

116:     DMPlexGetSupportSize(dm, point, &supportSize);
117:     DMPlexGetSupport(dm, point, &support);
118:     for (s = 0; s < supportSize; ++s) {
119:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
120:         if (support[s] == adj[q]) break;
121:       }
122:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
123:     }
124:   }
125:   *adjSize = numAdj;
126:   return(0);
127: }

129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131:   const PetscInt *support = NULL;
132:   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
133:   PetscErrorCode  ierr;

136:   DMPlexGetSupportSize(dm, p, &supportSize);
137:   DMPlexGetSupport(dm, p, &support);
138:   for (s = 0; s <= supportSize; ++s) {
139:     const PetscInt  point = !s ? p : support[s-1];
140:     const PetscInt *cone  = NULL;
141:     PetscInt        coneSize, c, q;

143:     DMPlexGetConeSize(dm, point, &coneSize);
144:     DMPlexGetCone(dm, point, &cone);
145:     for (c = 0; c < coneSize; ++c) {
146:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
147:         if (cone[c] == adj[q]) break;
148:       }
149:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
150:     }
151:   }
152:   *adjSize = numAdj;
153:   return(0);
154: }

156: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
157: {
158:   PetscInt      *star = NULL;
159:   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;

163:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
164:   for (s = 0; s < starSize*2; s += 2) {
165:     const PetscInt *closure = NULL;
166:     PetscInt        closureSize, c, q;

168:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
169:     for (c = 0; c < closureSize*2; c += 2) {
170:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
171:         if (closure[c] == adj[q]) break;
172:       }
173:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
174:     }
175:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
176:   }
177:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
178:   *adjSize = numAdj;
179:   return(0);
180: }

182: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
183: {
184:   static PetscInt asiz = 0;
185:   PetscInt maxAnchors = 1;
186:   PetscInt aStart = -1, aEnd = -1;
187:   PetscInt maxAdjSize;
188:   PetscSection aSec = NULL;
189:   IS aIS = NULL;
190:   const PetscInt *anchors;
191:   DM_Plex *mesh = (DM_Plex *)dm->data;
192:   PetscErrorCode  ierr;

195:   if (useAnchors) {
196:     DMPlexGetAnchors(dm,&aSec,&aIS);
197:     if (aSec) {
198:       PetscSectionGetMaxDof(aSec,&maxAnchors);
199:       maxAnchors = PetscMax(1,maxAnchors);
200:       PetscSectionGetChart(aSec,&aStart,&aEnd);
201:       ISGetIndices(aIS,&anchors);
202:     }
203:   }
204:   if (!*adj) {
205:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

207:     DMPlexGetChart(dm, &pStart,&pEnd);
208:     DMPlexGetDepth(dm, &depth);
209:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
210:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
211:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
212:     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
213:     asiz *= maxAnchors;
214:     asiz  = PetscMin(asiz,pEnd-pStart);
215:     PetscMalloc1(asiz,adj);
216:   }
217:   if (*adjSize < 0) *adjSize = asiz;
218:   maxAdjSize = *adjSize;
219:   if (mesh->useradjacency) {
220:     mesh->useradjacency(dm, p, adjSize, *adj, mesh->useradjacencyctx);
221:   } else if (useTransitiveClosure) {
222:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
223:   } else if (useCone) {
224:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
225:   } else {
226:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
227:   }
228:   if (useAnchors && aSec) {
229:     PetscInt origSize = *adjSize;
230:     PetscInt numAdj = origSize;
231:     PetscInt i = 0, j;
232:     PetscInt *orig = *adj;

234:     while (i < origSize) {
235:       PetscInt p = orig[i];
236:       PetscInt aDof = 0;

238:       if (p >= aStart && p < aEnd) {
239:         PetscSectionGetDof(aSec,p,&aDof);
240:       }
241:       if (aDof) {
242:         PetscInt aOff;
243:         PetscInt s, q;

245:         for (j = i + 1; j < numAdj; j++) {
246:           orig[j - 1] = orig[j];
247:         }
248:         origSize--;
249:         numAdj--;
250:         PetscSectionGetOffset(aSec,p,&aOff);
251:         for (s = 0; s < aDof; ++s) {
252:           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
253:             if (anchors[aOff+s] == orig[q]) break;
254:           }
255:           if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
256:         }
257:       }
258:       else {
259:         i++;
260:       }
261:     }
262:     *adjSize = numAdj;
263:     ISRestoreIndices(aIS,&anchors);
264:   }
265:   return(0);
266: }

268: /*@
269:   DMPlexGetAdjacency - Return all points adjacent to the given point

271:   Input Parameters:
272: + dm - The DM object
273: . p  - The point
274: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
275: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize

277:   Output Parameters:
278: + adjSize - The number of adjacent points
279: - adj - The adjacent points

281:   Level: advanced

283:   Notes:
284:     The user must PetscFree the adj array if it was not passed in.

286: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
287: @*/
288: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
289: {
290:   PetscBool      useCone, useClosure, useAnchors;

297:   DMGetBasicAdjacency(dm, &useCone, &useClosure);
298:   DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
299:   DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
300:   return(0);
301: }

303: /*@
304:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

306:   Collective on DM

308:   Input Parameters:
309: + dm      - The DM
310: - sfPoint - The PetscSF which encodes point connectivity

312:   Output Parameters:
313: + processRanks - A list of process neighbors, or NULL
314: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

316:   Level: developer

318: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
319: @*/
320: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
321: {
322:   const PetscSFNode *remotePoints;
323:   PetscInt          *localPointsNew;
324:   PetscSFNode       *remotePointsNew;
325:   const PetscInt    *nranks;
326:   PetscInt          *ranksNew;
327:   PetscBT            neighbors;
328:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
329:   PetscMPIInt        size, proc, rank;
330:   PetscErrorCode     ierr;

337:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
338:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
339:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
340:   PetscBTCreate(size, &neighbors);
341:   PetscBTMemzero(size, neighbors);
342:   /* Compute root-to-leaf process connectivity */
343:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
344:   ISGetIndices(rootRanks, &nranks);
345:   for (p = pStart; p < pEnd; ++p) {
346:     PetscInt ndof, noff, n;

348:     PetscSectionGetDof(rootRankSection, p, &ndof);
349:     PetscSectionGetOffset(rootRankSection, p, &noff);
350:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
351:   }
352:   ISRestoreIndices(rootRanks, &nranks);
353:   /* Compute leaf-to-neighbor process connectivity */
354:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
355:   ISGetIndices(leafRanks, &nranks);
356:   for (p = pStart; p < pEnd; ++p) {
357:     PetscInt ndof, noff, n;

359:     PetscSectionGetDof(leafRankSection, p, &ndof);
360:     PetscSectionGetOffset(leafRankSection, p, &noff);
361:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
362:   }
363:   ISRestoreIndices(leafRanks, &nranks);
364:   /* Compute leaf-to-root process connectivity */
365:   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
366:   /* Calculate edges */
367:   PetscBTClear(neighbors, rank);
368:   for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
369:   PetscMalloc1(numNeighbors, &ranksNew);
370:   PetscMalloc1(numNeighbors, &localPointsNew);
371:   PetscMalloc1(numNeighbors, &remotePointsNew);
372:   for(proc = 0, n = 0; proc < size; ++proc) {
373:     if (PetscBTLookup(neighbors, proc)) {
374:       ranksNew[n]              = proc;
375:       localPointsNew[n]        = proc;
376:       remotePointsNew[n].index = rank;
377:       remotePointsNew[n].rank  = proc;
378:       ++n;
379:     }
380:   }
381:   PetscBTDestroy(&neighbors);
382:   if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
383:   else              {PetscFree(ranksNew);}
384:   if (sfProcess) {
385:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
386:     PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
387:     PetscSFSetFromOptions(*sfProcess);
388:     PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
389:   }
390:   return(0);
391: }

393: /*@
394:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

396:   Collective on DM

398:   Input Parameter:
399: . dm - The DM

401:   Output Parameters:
402: + rootSection - The number of leaves for a given root point
403: . rootrank    - The rank of each edge into the root point
404: . leafSection - The number of processes sharing a given leaf point
405: - leafrank    - The rank of each process sharing a leaf point

407:   Level: developer

409: .seealso: DMPlexCreateOverlap()
410: @*/
411: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
412: {
413:   MPI_Comm        comm;
414:   PetscSF         sfPoint;
415:   const PetscInt *rootdegree;
416:   PetscInt       *myrank, *remoterank;
417:   PetscInt        pStart, pEnd, p, nedges;
418:   PetscMPIInt     rank;
419:   PetscErrorCode  ierr;

422:   PetscObjectGetComm((PetscObject) dm, &comm);
423:   MPI_Comm_rank(comm, &rank);
424:   DMPlexGetChart(dm, &pStart, &pEnd);
425:   DMGetPointSF(dm, &sfPoint);
426:   /* Compute number of leaves for each root */
427:   PetscObjectSetName((PetscObject) rootSection, "Root Section");
428:   PetscSectionSetChart(rootSection, pStart, pEnd);
429:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
430:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
431:   for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
432:   PetscSectionSetUp(rootSection);
433:   /* Gather rank of each leaf to root */
434:   PetscSectionGetStorageSize(rootSection, &nedges);
435:   PetscMalloc1(pEnd-pStart, &myrank);
436:   PetscMalloc1(nedges,  &remoterank);
437:   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
438:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
439:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
440:   PetscFree(myrank);
441:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
442:   /* Distribute remote ranks to leaves */
443:   PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
444:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
445:   return(0);
446: }

448: /*@C
449:   DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF.

451:   Collective on DM

453:   Input Parameters:
454: + dm          - The DM
455: . levels      - Number of overlap levels
456: . rootSection - The number of leaves for a given root point
457: . rootrank    - The rank of each edge into the root point
458: . leafSection - The number of processes sharing a given leaf point
459: - leafrank    - The rank of each process sharing a leaf point

461:   Output Parameters:
462: + ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

464:   Level: developer

466: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
467: @*/
468: PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
469: {
470:   MPI_Comm           comm;
471:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
472:   PetscSF            sfPoint;
473:   const PetscSFNode *remote;
474:   const PetscInt    *local;
475:   const PetscInt    *nrank, *rrank;
476:   PetscInt          *adj = NULL;
477:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
478:   PetscMPIInt        rank, size;
479:   PetscBool          useCone, useClosure, flg;
480:   PetscErrorCode     ierr;

483:   PetscObjectGetComm((PetscObject) dm, &comm);
484:   MPI_Comm_size(comm, &size);
485:   MPI_Comm_rank(comm, &rank);
486:   DMGetPointSF(dm, &sfPoint);
487:   DMPlexGetChart(dm, &pStart, &pEnd);
488:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
489:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
490:   DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
491:   /* Handle leaves: shared with the root point */
492:   for (l = 0; l < nleaves; ++l) {
493:     PetscInt adjSize = PETSC_DETERMINE, a;

495:     DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
496:     for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
497:   }
498:   ISGetIndices(rootrank, &rrank);
499:   ISGetIndices(leafrank, &nrank);
500:   /* Handle roots */
501:   for (p = pStart; p < pEnd; ++p) {
502:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

504:     if ((p >= sStart) && (p < sEnd)) {
505:       /* Some leaves share a root with other leaves on different processes */
506:       PetscSectionGetDof(leafSection, p, &neighbors);
507:       if (neighbors) {
508:         PetscSectionGetOffset(leafSection, p, &noff);
509:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
510:         for (n = 0; n < neighbors; ++n) {
511:           const PetscInt remoteRank = nrank[noff+n];

513:           if (remoteRank == rank) continue;
514:           for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
515:         }
516:       }
517:     }
518:     /* Roots are shared with leaves */
519:     PetscSectionGetDof(rootSection, p, &neighbors);
520:     if (!neighbors) continue;
521:     PetscSectionGetOffset(rootSection, p, &noff);
522:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
523:     for (n = 0; n < neighbors; ++n) {
524:       const PetscInt remoteRank = rrank[noff+n];

526:       if (remoteRank == rank) continue;
527:       for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
528:     }
529:   }
530:   PetscFree(adj);
531:   ISRestoreIndices(rootrank, &rrank);
532:   ISRestoreIndices(leafrank, &nrank);
533:   /* Add additional overlap levels */
534:   for (l = 1; l < levels; l++) {
535:     /* Propagate point donations over SF to capture remote connections */
536:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
537:     /* Add next level of point donations to the label */
538:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
539:   }
540:   /* We require the closure in the overlap */
541:   DMGetBasicAdjacency(dm, &useCone, &useClosure);
542:   if (useCone || !useClosure) {
543:     DMPlexPartitionLabelClosure(dm, ovAdjByRank);
544:   }
545:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
546:   if (flg) {
547:     PetscViewer viewer;
548:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
549:     DMLabelView(ovAdjByRank, viewer);
550:   }
551:   /* Invert sender to receiver label */
552:   DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
553:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
554:   /* Add owned points, except for shared local points */
555:   for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
556:   for (l = 0; l < nleaves; ++l) {
557:     DMLabelClearValue(*ovLabel, local[l], rank);
558:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
559:   }
560:   /* Clean up */
561:   DMLabelDestroy(&ovAdjByRank);
562:   return(0);
563: }

565: /*@C
566:   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF

568:   Collective on DM

570:   Input Parameters:
571: + dm          - The DM
572: - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes

574:   Output Parameters:
575: + migrationSF - An SF that maps original points in old locations to points in new locations

577:   Level: developer

579: .seealso: DMPlexCreateOverlap(), DMPlexDistribute()
580: @*/
581: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
582: {
583:   MPI_Comm           comm;
584:   PetscMPIInt        rank, size;
585:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
586:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
587:   PetscInt          *depthRecv, *depthShift, *depthIdx;
588:   PetscSFNode       *iremote;
589:   PetscSF            pointSF;
590:   const PetscInt    *sharedLocal;
591:   const PetscSFNode *overlapRemote, *sharedRemote;
592:   PetscErrorCode     ierr;

596:   PetscObjectGetComm((PetscObject)dm, &comm);
597:   MPI_Comm_rank(comm, &rank);
598:   MPI_Comm_size(comm, &size);
599:   DMGetDimension(dm, &dim);

601:   /* Before building the migration SF we need to know the new stratum offsets */
602:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
603:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
604:   for (d=0; d<dim+1; d++) {
605:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
606:     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
607:   }
608:   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
609:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
610:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);

612:   /* Count recevied points in each stratum and compute the internal strata shift */
613:   PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
614:   for (d=0; d<dim+1; d++) depthRecv[d]=0;
615:   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
616:   depthShift[dim] = 0;
617:   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
618:   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
619:   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
620:   for (d=0; d<dim+1; d++) {
621:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
622:     depthIdx[d] = pStart + depthShift[d];
623:   }

625:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
626:   DMPlexGetChart(dm, &pStart, &pEnd);
627:   newLeaves = pEnd - pStart + nleaves;
628:   PetscMalloc1(newLeaves, &ilocal);
629:   PetscMalloc1(newLeaves, &iremote);
630:   /* First map local points to themselves */
631:   for (d=0; d<dim+1; d++) {
632:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
633:     for (p=pStart; p<pEnd; p++) {
634:       point = p + depthShift[d];
635:       ilocal[point] = point;
636:       iremote[point].index = p;
637:       iremote[point].rank = rank;
638:       depthIdx[d]++;
639:     }
640:   }

642:   /* Add in the remote roots for currently shared points */
643:   DMGetPointSF(dm, &pointSF);
644:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
645:   for (d=0; d<dim+1; d++) {
646:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
647:     for (p=0; p<numSharedPoints; p++) {
648:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
649:         point = sharedLocal[p] + depthShift[d];
650:         iremote[point].index = sharedRemote[p].index;
651:         iremote[point].rank = sharedRemote[p].rank;
652:       }
653:     }
654:   }

656:   /* Now add the incoming overlap points */
657:   for (p=0; p<nleaves; p++) {
658:     point = depthIdx[remoteDepths[p]];
659:     ilocal[point] = point;
660:     iremote[point].index = overlapRemote[p].index;
661:     iremote[point].rank = overlapRemote[p].rank;
662:     depthIdx[remoteDepths[p]]++;
663:   }
664:   PetscFree2(pointDepths,remoteDepths);

666:   PetscSFCreate(comm, migrationSF);
667:   PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
668:   PetscSFSetFromOptions(*migrationSF);
669:   DMPlexGetChart(dm, &pStart, &pEnd);
670:   PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

672:   PetscFree3(depthRecv, depthShift, depthIdx);
673:   return(0);
674: }

676: /*@
677:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

679:   Input Parameter:
680: + dm          - The DM
681: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

683:   Output Parameter:
684: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

686:   Level: developer

688: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
689: @*/
690: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
691: {
692:   MPI_Comm           comm;
693:   PetscMPIInt        rank, size;
694:   PetscInt           d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
695:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
696:   PetscInt          *depthRecv, *depthShift, *depthIdx;
697:   PetscInt           hybEnd[4];
698:   const PetscSFNode *iremote;
699:   PetscErrorCode     ierr;

703:   PetscObjectGetComm((PetscObject) dm, &comm);
704:   MPI_Comm_rank(comm, &rank);
705:   MPI_Comm_size(comm, &size);
706:   DMPlexGetDepth(dm, &ldepth);
707:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
708:   if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);

710:   /* Before building the migration SF we need to know the new stratum offsets */
711:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
712:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
713:   DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[depth-1],&hybEnd[1],&hybEnd[0]);
714:   for (d = 0; d < depth+1; ++d) {
715:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
716:     for (p = pStart; p < pEnd; ++p) {
717:       if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */
718:         pointDepths[p] = 2 * d;
719:       } else {
720:         pointDepths[p] = 2 * d + 1;
721:       }
722:     }
723:   }
724:   for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
725:   PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
726:   PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
727:   /* Count recevied points in each stratum and compute the internal strata shift */
728:   PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);
729:   for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0;
730:   for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
731:   depthShift[2*depth+1] = 0;
732:   for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1];
733:   for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth];
734:   depthShift[0] += depthRecv[1];
735:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1];
736:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0];
737:   for (d = 2 * depth-1; d > 2; --d) {
738:     PetscInt e;

740:     for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d];
741:   }
742:   for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;}
743:   /* Derive a new local permutation based on stratified indices */
744:   PetscMalloc1(nleaves, &ilocal);
745:   for (p = 0; p < nleaves; ++p) {
746:     const PetscInt dep = remoteDepths[p];

748:     ilocal[p] = depthShift[dep] + depthIdx[dep];
749:     depthIdx[dep]++;
750:   }
751:   PetscSFCreate(comm, migrationSF);
752:   PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
753:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
754:   PetscFree2(pointDepths,remoteDepths);
755:   PetscFree3(depthRecv, depthShift, depthIdx);
756:   return(0);
757: }

759: /*@
760:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

762:   Collective on DM

764:   Input Parameters:
765: + dm - The DMPlex object
766: . pointSF - The PetscSF describing the communication pattern
767: . originalSection - The PetscSection for existing data layout
768: - originalVec - The existing data

770:   Output Parameters:
771: + newSection - The PetscSF describing the new data layout
772: - newVec - The new data

774:   Level: developer

776: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
777: @*/
778: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
779: {
780:   PetscSF        fieldSF;
781:   PetscInt      *remoteOffsets, fieldSize;
782:   PetscScalar   *originalValues, *newValues;

786:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
787:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

789:   PetscSectionGetStorageSize(newSection, &fieldSize);
790:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
791:   VecSetType(newVec,dm->vectype);

793:   VecGetArray(originalVec, &originalValues);
794:   VecGetArray(newVec, &newValues);
795:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
796:   PetscFree(remoteOffsets);
797:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
798:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
799:   PetscSFDestroy(&fieldSF);
800:   VecRestoreArray(newVec, &newValues);
801:   VecRestoreArray(originalVec, &originalValues);
802:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
803:   return(0);
804: }

806: /*@
807:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

809:   Collective on DM

811:   Input Parameters:
812: + dm - The DMPlex object
813: . pointSF - The PetscSF describing the communication pattern
814: . originalSection - The PetscSection for existing data layout
815: - originalIS - The existing data

817:   Output Parameters:
818: + newSection - The PetscSF describing the new data layout
819: - newIS - The new data

821:   Level: developer

823: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
824: @*/
825: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
826: {
827:   PetscSF         fieldSF;
828:   PetscInt       *newValues, *remoteOffsets, fieldSize;
829:   const PetscInt *originalValues;
830:   PetscErrorCode  ierr;

833:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
834:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

836:   PetscSectionGetStorageSize(newSection, &fieldSize);
837:   PetscMalloc1(fieldSize, &newValues);

839:   ISGetIndices(originalIS, &originalValues);
840:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
841:   PetscFree(remoteOffsets);
842:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
843:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
844:   PetscSFDestroy(&fieldSF);
845:   ISRestoreIndices(originalIS, &originalValues);
846:   ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
847:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
848:   return(0);
849: }

851: /*@
852:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

854:   Collective on DM

856:   Input Parameters:
857: + dm - The DMPlex object
858: . pointSF - The PetscSF describing the communication pattern
859: . originalSection - The PetscSection for existing data layout
860: . datatype - The type of data
861: - originalData - The existing data

863:   Output Parameters:
864: + newSection - The PetscSection describing the new data layout
865: - newData - The new data

867:   Level: developer

869: .seealso: DMPlexDistribute(), DMPlexDistributeField()
870: @*/
871: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
872: {
873:   PetscSF        fieldSF;
874:   PetscInt      *remoteOffsets, fieldSize;
875:   PetscMPIInt    dataSize;

879:   PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
880:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

882:   PetscSectionGetStorageSize(newSection, &fieldSize);
883:   MPI_Type_size(datatype, &dataSize);
884:   PetscMalloc(fieldSize * dataSize, newData);

886:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
887:   PetscFree(remoteOffsets);
888:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
889:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
890:   PetscSFDestroy(&fieldSF);
891:   PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
892:   return(0);
893: }

895: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
896: {
897:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
898:   MPI_Comm               comm;
899:   PetscSF                coneSF;
900:   PetscSection           originalConeSection, newConeSection;
901:   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
902:   PetscBool              flg;
903:   PetscErrorCode         ierr;


909:   PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
910:   /* Distribute cone section */
911:   PetscObjectGetComm((PetscObject)dm, &comm);
912:   DMPlexGetConeSection(dm, &originalConeSection);
913:   DMPlexGetConeSection(dmParallel, &newConeSection);
914:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
915:   DMSetUp(dmParallel);
916:   {
917:     PetscInt pStart, pEnd, p;

919:     PetscSectionGetChart(newConeSection, &pStart, &pEnd);
920:     for (p = pStart; p < pEnd; ++p) {
921:       PetscInt coneSize;
922:       PetscSectionGetDof(newConeSection, p, &coneSize);
923:       pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
924:     }
925:   }
926:   /* Communicate and renumber cones */
927:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
928:   PetscFree(remoteOffsets);
929:   DMPlexGetCones(dm, &cones);
930:   if (original) {
931:     PetscInt numCones;

933:     PetscSectionGetStorageSize(originalConeSection,&numCones);
934:     PetscMalloc1(numCones,&globCones);
935:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
936:   } else {
937:     globCones = cones;
938:   }
939:   DMPlexGetCones(dmParallel, &newCones);
940:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
941:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
942:   if (original) {
943:     PetscFree(globCones);
944:   }
945:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
946:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
947: #if defined(PETSC_USE_DEBUG)
948:   {
949:     PetscInt  p;
950:     PetscBool valid = PETSC_TRUE;
951:     for (p = 0; p < newConesSize; ++p) {
952:       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);}
953:     }
954:     if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
955:   }
956: #endif
957:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
958:   if (flg) {
959:     PetscPrintf(comm, "Serial Cone Section:\n");
960:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);
961:     PetscPrintf(comm, "Parallel Cone Section:\n");
962:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);
963:     PetscSFView(coneSF, NULL);
964:   }
965:   DMPlexGetConeOrientations(dm, &cones);
966:   DMPlexGetConeOrientations(dmParallel, &newCones);
967:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
968:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
969:   PetscSFDestroy(&coneSF);
970:   PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
971:   /* Create supports and stratify DMPlex */
972:   {
973:     PetscInt pStart, pEnd;

975:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
976:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
977:   }
978:   DMPlexSymmetrize(dmParallel);
979:   DMPlexStratify(dmParallel);
980:   {
981:     PetscBool useCone, useClosure, useAnchors;

983:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
984:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
985:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
986:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
987:   }
988:   return(0);
989: }

991: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
992: {
993:   MPI_Comm         comm;
994:   PetscSection     originalCoordSection, newCoordSection;
995:   Vec              originalCoordinates, newCoordinates;
996:   PetscInt         bs;
997:   PetscBool        isper;
998:   const char      *name;
999:   const PetscReal *maxCell, *L;
1000:   const DMBoundaryType *bd;
1001:   PetscErrorCode   ierr;


1007:   PetscObjectGetComm((PetscObject)dm, &comm);
1008:   DMGetCoordinateSection(dm, &originalCoordSection);
1009:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1010:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1011:   if (originalCoordinates) {
1012:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1013:     PetscObjectGetName((PetscObject) originalCoordinates, &name);
1014:     PetscObjectSetName((PetscObject) newCoordinates, name);

1016:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1017:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1018:     VecGetBlockSize(originalCoordinates, &bs);
1019:     VecSetBlockSize(newCoordinates, bs);
1020:     VecDestroy(&newCoordinates);
1021:   }
1022:   DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1023:   DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1024:   return(0);
1025: }

1027: /* Here we are assuming that process 0 always has everything */
1028: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1029: {
1030:   DM_Plex         *mesh = (DM_Plex*) dm->data;
1031:   MPI_Comm         comm;
1032:   DMLabel          depthLabel;
1033:   PetscMPIInt      rank;
1034:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1035:   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1036:   PetscObjectState depthState = -1;
1037:   PetscErrorCode   ierr;


1043:   PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1044:   PetscObjectGetComm((PetscObject)dm, &comm);
1045:   MPI_Comm_rank(comm, &rank);

1047:   /* If the user has changed the depth label, communicate it instead */
1048:   DMPlexGetDepth(dm, &depth);
1049:   DMPlexGetDepthLabel(dm, &depthLabel);
1050:   if (depthLabel) {PetscObjectStateGet((PetscObject) depthLabel, &depthState);}
1051:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1052:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1053:   if (sendDepth) {
1054:     DMRemoveLabel(dmParallel, "depth", &depthLabel);
1055:     DMLabelDestroy(&depthLabel);
1056:   }
1057:   /* Everyone must have either the same number of labels, or none */
1058:   DMGetNumLabels(dm, &numLocalLabels);
1059:   numLabels = numLocalLabels;
1060:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1061:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1062:   for (l = numLabels-1; l >= 0; --l) {
1063:     DMLabel     label = NULL, labelNew = NULL;
1064:     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;
1065:     const char *name = NULL;

1067:     if (hasLabels) {
1068:       DMGetLabelByNum(dm, l, &label);
1069:       /* Skip "depth" because it is recreated */
1070:       PetscObjectGetName((PetscObject) label, &name);
1071:       PetscStrcmp(name, "depth", &isDepth);
1072:     }
1073:     MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1074:     if (isDepth && !sendDepth) continue;
1075:     DMLabelDistribute(label, migrationSF, &labelNew);
1076:     if (isDepth) {
1077:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1078:       PetscInt gdepth;

1080:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1081:       if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1082:       for (d = 0; d <= gdepth; ++d) {
1083:         PetscBool has;

1085:         DMLabelHasStratum(labelNew, d, &has);
1086:         if (!has) {DMLabelAddStratum(labelNew, d);}
1087:       }
1088:     }
1089:     DMAddLabel(dmParallel, labelNew);
1090:     /* Put the output flag in the new label */
1091:     if (hasLabels) {DMGetLabelOutput(dm, name, &lisOutput);}
1092:     MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1093:     PetscObjectGetName((PetscObject) labelNew, &name);
1094:     DMSetLabelOutput(dmParallel, name, isOutput);
1095:   }
1096:   PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1097:   return(0);
1098: }

1100: static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1101: {
1102:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1103:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1104:   PetscBool      *isHybrid, *isHybridParallel;
1105:   PetscInt        dim, depth, d;
1106:   PetscInt        pStart, pEnd, pStartP, pEndP;
1107:   PetscErrorCode  ierr;


1113:   DMGetDimension(dm, &dim);
1114:   DMPlexGetDepth(dm, &depth);
1115:   DMPlexGetChart(dm,&pStart,&pEnd);
1116:   DMPlexGetChart(dmParallel,&pStartP,&pEndP);
1117:   PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);
1118:   for (d = 0; d <= depth; d++) {
1119:     PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d];

1121:     if (hybridMax >= 0) {
1122:       PetscInt sStart, sEnd, p;

1124:       DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);
1125:       for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE;
1126:     }
1127:   }
1128:   PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1129:   PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1130:   for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1;
1131:   for (d = 0; d <= depth; d++) {
1132:     PetscInt sStart, sEnd, p, dd;

1134:     DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);
1135:     dd = (depth == 1 && d == 1) ? dim : d;
1136:     for (p = sStart; p < sEnd; p++) {
1137:       if (isHybridParallel[p-pStartP]) {
1138:         pmesh->hybridPointMax[dd] = p;
1139:         break;
1140:       }
1141:     }
1142:   }
1143:   PetscFree2(isHybrid,isHybridParallel);
1144:   return(0);
1145: }

1147: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1148: {
1149:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1150:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1151:   MPI_Comm        comm;
1152:   DM              refTree;
1153:   PetscSection    origParentSection, newParentSection;
1154:   PetscInt        *origParents, *origChildIDs;
1155:   PetscBool       flg;
1156:   PetscErrorCode  ierr;

1161:   PetscObjectGetComm((PetscObject)dm, &comm);

1163:   /* Set up tree */
1164:   DMPlexGetReferenceTree(dm,&refTree);
1165:   DMPlexSetReferenceTree(dmParallel,refTree);
1166:   DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1167:   if (origParentSection) {
1168:     PetscInt        pStart, pEnd;
1169:     PetscInt        *newParents, *newChildIDs, *globParents;
1170:     PetscInt        *remoteOffsetsParents, newParentSize;
1171:     PetscSF         parentSF;

1173:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1174:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1175:     PetscSectionSetChart(newParentSection,pStart,pEnd);
1176:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1177:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1178:     PetscFree(remoteOffsetsParents);
1179:     PetscSectionGetStorageSize(newParentSection,&newParentSize);
1180:     PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1181:     if (original) {
1182:       PetscInt numParents;

1184:       PetscSectionGetStorageSize(origParentSection,&numParents);
1185:       PetscMalloc1(numParents,&globParents);
1186:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1187:     }
1188:     else {
1189:       globParents = origParents;
1190:     }
1191:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1192:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1193:     if (original) {
1194:       PetscFree(globParents);
1195:     }
1196:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1197:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1198:     ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1199: #if defined(PETSC_USE_DEBUG)
1200:     {
1201:       PetscInt  p;
1202:       PetscBool valid = PETSC_TRUE;
1203:       for (p = 0; p < newParentSize; ++p) {
1204:         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1205:       }
1206:       if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1207:     }
1208: #endif
1209:     PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1210:     if (flg) {
1211:       PetscPrintf(comm, "Serial Parent Section: \n");
1212:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);
1213:       PetscPrintf(comm, "Parallel Parent Section: \n");
1214:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);
1215:       PetscSFView(parentSF, NULL);
1216:     }
1217:     DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1218:     PetscSectionDestroy(&newParentSection);
1219:     PetscFree2(newParents,newChildIDs);
1220:     PetscSFDestroy(&parentSF);
1221:   }
1222:   pmesh->useAnchors = mesh->useAnchors;
1223:   return(0);
1224: }

1226: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1227: {
1228:   PetscMPIInt            rank, size;
1229:   MPI_Comm               comm;
1230:   PetscErrorCode         ierr;


1236:   /* Create point SF for parallel mesh */
1237:   PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1238:   PetscObjectGetComm((PetscObject)dm, &comm);
1239:   MPI_Comm_rank(comm, &rank);
1240:   MPI_Comm_size(comm, &size);
1241:   {
1242:     const PetscInt *leaves;
1243:     PetscSFNode    *remotePoints, *rowners, *lowners;
1244:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1245:     PetscInt        pStart, pEnd;

1247:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1248:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1249:     PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1250:     for (p=0; p<numRoots; p++) {
1251:       rowners[p].rank  = -1;
1252:       rowners[p].index = -1;
1253:     }
1254:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1255:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1256:     for (p = 0; p < numLeaves; ++p) {
1257:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1258:         lowners[p].rank  = rank;
1259:         lowners[p].index = leaves ? leaves[p] : p;
1260:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1261:         lowners[p].rank  = -2;
1262:         lowners[p].index = -2;
1263:       }
1264:     }
1265:     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1266:       rowners[p].rank  = -3;
1267:       rowners[p].index = -3;
1268:     }
1269:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1270:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1271:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1272:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1273:     for (p = 0; p < numLeaves; ++p) {
1274:       if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1275:       if (lowners[p].rank != rank) ++numGhostPoints;
1276:     }
1277:     PetscMalloc1(numGhostPoints, &ghostPoints);
1278:     PetscMalloc1(numGhostPoints, &remotePoints);
1279:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1280:       if (lowners[p].rank != rank) {
1281:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1282:         remotePoints[gp].rank  = lowners[p].rank;
1283:         remotePoints[gp].index = lowners[p].index;
1284:         ++gp;
1285:       }
1286:     }
1287:     PetscFree2(rowners,lowners);
1288:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1289:     PetscSFSetFromOptions((dmParallel)->sf);
1290:   }
1291:   {
1292:     PetscBool useCone, useClosure, useAnchors;

1294:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1295:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1296:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1297:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1298:   }
1299:   PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1300:   return(0);
1301: }

1303: /*@
1304:   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?

1306:   Input Parameters:
1307: + dm - The DMPlex object
1308: - flg - Balance the partition?

1310:   Level: intermediate

1312: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1313: @*/
1314: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1315: {
1316:   DM_Plex *mesh = (DM_Plex *)dm->data;

1319:   mesh->partitionBalance = flg;
1320:   return(0);
1321: }

1323: /*@
1324:   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?

1326:   Input Parameter:
1327: + dm - The DMPlex object

1329:   Output Parameter:
1330: + flg - Balance the partition?

1332:   Level: intermediate

1334: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1335: @*/
1336: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1337: {
1338:   DM_Plex *mesh = (DM_Plex *)dm->data;

1343:   *flg = mesh->partitionBalance;
1344:   return(0);
1345: }

1347: /*@C
1348:   DMPlexDerivePointSF - Build a point SF from an SF describing a point migration

1350:   Input Parameter:
1351: + dm          - The source DMPlex object
1352: . migrationSF - The star forest that describes the parallel point remapping
1353: . ownership   - Flag causing a vote to determine point ownership

1355:   Output Parameter:
1356: - pointSF     - The star forest describing the point overlap in the remapped DM

1358:   Level: developer

1360: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1361: @*/
1362: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1363: {
1364:   PetscMPIInt        rank, size;
1365:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1366:   PetscInt          *pointLocal;
1367:   const PetscInt    *leaves;
1368:   const PetscSFNode *roots;
1369:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1370:   Vec                shifts;
1371:   const PetscInt     numShifts = 13759;
1372:   const PetscScalar *shift = NULL;
1373:   const PetscBool    shiftDebug = PETSC_FALSE;
1374:   PetscBool          balance;
1375:   PetscErrorCode     ierr;

1379:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1380:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);

1382:   DMPlexGetPartitionBalance(dm, &balance);
1383:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1384:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1385:   if (ownership) {
1386:     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1387:     if (balance) {
1388:       PetscRandom r;

1390:       PetscRandomCreate(PETSC_COMM_SELF, &r);
1391:       PetscRandomSetInterval(r, 0, 2467*size);
1392:       VecCreate(PETSC_COMM_SELF, &shifts);
1393:       VecSetSizes(shifts, numShifts, numShifts);
1394:       VecSetType(shifts, VECSTANDARD);
1395:       VecSetRandom(shifts, r);
1396:       PetscRandomDestroy(&r);
1397:       VecGetArrayRead(shifts, &shift);
1398:     }

1400:     /* Point ownership vote: Process with highest rank owns shared points */
1401:     for (p = 0; p < nleaves; ++p) {
1402:       if (shiftDebug) {
1403:         PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1404:       }
1405:       /* Either put in a bid or we know we own it */
1406:       leafNodes[p].rank  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1407:       leafNodes[p].index = p;
1408:     }
1409:     for (p = 0; p < nroots; p++) {
1410:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1411:       rootNodes[p].rank  = -3;
1412:       rootNodes[p].index = -3;
1413:     }
1414:     PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1415:     PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1416:     if (balance) {
1417:       /* We've voted, now we need to get the rank.  When we're balancing the partition, the "rank" in rootNotes is not
1418:        * the rank but rather (rank + random)%size.  So we do another reduction, voting the same way, but sending the
1419:        * rank instead of the index. */
1420:       PetscSFNode *rootRanks = NULL;
1421:       PetscMalloc1(nroots, &rootRanks);
1422:       for (p = 0; p < nroots; p++) {
1423:         rootRanks[p].rank = -3;
1424:         rootRanks[p].index = -3;
1425:       }
1426:       for (p = 0; p < nleaves; p++) leafNodes[p].index = rank;
1427:       PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);
1428:       PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);
1429:       for (p = 0; p < nroots; p++) rootNodes[p].rank = rootRanks[p].index;
1430:       PetscFree(rootRanks);
1431:     }
1432:   } else {
1433:     for (p = 0; p < nroots; p++) {
1434:       rootNodes[p].index = -1;
1435:       rootNodes[p].rank = rank;
1436:     };
1437:     for (p = 0; p < nleaves; p++) {
1438:       /* Write new local id into old location */
1439:       if (roots[p].rank == rank) {
1440:         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1441:       }
1442:     }
1443:   }
1444:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1445:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);

1447:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1448:     if (leafNodes[p].rank != rank) npointLeaves++;
1449:   }
1450:   PetscMalloc1(npointLeaves, &pointLocal);
1451:   PetscMalloc1(npointLeaves, &pointRemote);
1452:   for (idx = 0, p = 0; p < nleaves; p++) {
1453:     if (leafNodes[p].rank != rank) {
1454:       pointLocal[idx] = p;
1455:       pointRemote[idx] = leafNodes[p];
1456:       idx++;
1457:     }
1458:   }
1459:   if (shift) {
1460:     VecRestoreArrayRead(shifts, &shift);
1461:     VecDestroy(&shifts);
1462:   }
1463:   if (shiftDebug) {PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);}
1464:   PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1465:   PetscSFSetFromOptions(*pointSF);
1466:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1467:   PetscFree2(rootNodes, leafNodes);
1468:   return(0);
1469: }

1471: /*@C
1472:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest
1473:   
1474:   Collective on DM and PetscSF

1476:   Input Parameter:
1477: + dm       - The source DMPlex object
1478: . sf       - The star forest communication context describing the migration pattern

1480:   Output Parameter:
1481: - targetDM - The target DMPlex object

1483:   Level: intermediate

1485: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1486: @*/
1487: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1488: {
1489:   MPI_Comm               comm;
1490:   PetscInt               dim, cdim, nroots;
1491:   PetscSF                sfPoint;
1492:   ISLocalToGlobalMapping ltogMigration;
1493:   ISLocalToGlobalMapping ltogOriginal = NULL;
1494:   PetscBool              flg;
1495:   PetscErrorCode         ierr;

1499:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1500:   PetscObjectGetComm((PetscObject) dm, &comm);
1501:   DMGetDimension(dm, &dim);
1502:   DMSetDimension(targetDM, dim);
1503:   DMGetCoordinateDim(dm, &cdim);
1504:   DMSetCoordinateDim(targetDM, cdim);

1506:   /* Check for a one-to-all distribution pattern */
1507:   DMGetPointSF(dm, &sfPoint);
1508:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1509:   if (nroots >= 0) {
1510:     IS        isOriginal;
1511:     PetscInt  n, size, nleaves;
1512:     PetscInt  *numbering_orig, *numbering_new;

1514:     /* Get the original point numbering */
1515:     DMPlexCreatePointNumbering(dm, &isOriginal);
1516:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1517:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1518:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1519:     /* Convert to positive global numbers */
1520:     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1521:     /* Derive the new local-to-global mapping from the old one */
1522:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1523:     PetscMalloc1(nleaves, &numbering_new);
1524:     PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1525:     PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1526:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1527:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1528:     ISDestroy(&isOriginal);
1529:   } else {
1530:     /* One-to-all distribution pattern: We can derive LToG from SF */
1531:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1532:   }
1533:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1534:   if (flg) {
1535:     PetscPrintf(comm, "Point renumbering for DM migration:\n");
1536:     ISLocalToGlobalMappingView(ltogMigration, NULL);
1537:   }
1538:   /* Migrate DM data to target DM */
1539:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1540:   DMPlexDistributeLabels(dm, sf, targetDM);
1541:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1542:   DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1543:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1544:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1545:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1546:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1547:   return(0);
1548: }

1550: PETSC_INTERN PetscErrorCode DMPlexPartitionLabelClosure_Private(DM,DMLabel,PetscInt,PetscInt,const PetscInt[],IS*);

1552: /*@C
1553:   DMPlexDistribute - Distributes the mesh and any associated sections.

1555:   Collective on DM

1557:   Input Parameter:
1558: + dm  - The original DMPlex object
1559: - overlap - The overlap of partitions, 0 is the default

1561:   Output Parameter:
1562: + sf - The PetscSF used for point distribution, or NULL if not needed
1563: - dmParallel - The distributed DMPlex object

1565:   Note: If the mesh was not distributed, the output dmParallel will be NULL.

1567:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1568:   representation on the mesh.

1570:   Level: intermediate

1572: .keywords: mesh, elements
1573: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMSetAdjacency()
1574: @*/
1575: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1576: {
1577:   MPI_Comm               comm;
1578:   PetscPartitioner       partitioner;
1579:   IS                     cellPart;
1580:   PetscSection           cellPartSection;
1581:   DM                     dmCoord;
1582:   DMLabel                lblPartition, lblMigration;
1583:   PetscSF                sfMigration, sfStratified, sfPoint;
1584:   PetscBool              flg, balance;
1585:   PetscMPIInt            rank, size;
1586:   PetscErrorCode         ierr;


1594:   if (sf) *sf = NULL;
1595:   *dmParallel = NULL;
1596:   PetscObjectGetComm((PetscObject)dm,&comm);
1597:   MPI_Comm_rank(comm, &rank);
1598:   MPI_Comm_size(comm, &size);
1599:   if (size == 1) return(0);

1601:   PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1602:   /* Create cell partition */
1603:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1604:   PetscSectionCreate(comm, &cellPartSection);
1605:   DMPlexGetPartitioner(dm, &partitioner);
1606:   PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1607:   {
1608:     /* Convert partition to DMLabel */
1609:     IS         is;
1610:     PetscHSetI ht;
1611:     PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks, *iranks;
1612:     const PetscInt *points;

1614:     DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1615:     /* Preallocate strata */
1616:     PetscHSetICreate(&ht);
1617:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1618:     for (proc = pStart; proc < pEnd; proc++) {
1619:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1620:       if (npoints) {PetscHSetIAdd(ht, proc);}
1621:     }
1622:     PetscHSetIGetSize(ht, &nranks);
1623:     PetscMalloc1(nranks, &iranks);
1624:     PetscHSetIGetElems(ht, &poff, iranks);
1625:     PetscHSetIDestroy(&ht);
1626:     DMLabelAddStrata(lblPartition, nranks, iranks);
1627:     PetscFree(iranks);
1628:     /* Inline DMPlexPartitionLabelClosure() */
1629:     ISGetIndices(cellPart, &points);
1630:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1631:     for (proc = pStart; proc < pEnd; proc++) {
1632:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1633:       if (!npoints) continue;
1634:       PetscSectionGetOffset(cellPartSection, proc, &poff);
1635:       DMPlexPartitionLabelClosure_Private(dm, lblPartition, proc, npoints, points+poff, &is);
1636:       DMLabelSetStratumIS(lblPartition, proc, is);
1637:       ISDestroy(&is);
1638:     }
1639:     ISRestoreIndices(cellPart, &points);
1640:   }
1641:   DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1642:   DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1643:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1644:   /* Stratify the SF in case we are migrating an already parallel plex */
1645:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1646:   PetscSFDestroy(&sfMigration);
1647:   sfMigration = sfStratified;
1648:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1649:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1650:   if (flg) {
1651:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);
1652:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);
1653:   }

1655:   /* Create non-overlapping parallel DM and migrate internal data */
1656:   DMPlexCreate(comm, dmParallel);
1657:   PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1658:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1660:   /* Build the point SF without overlap */
1661:   DMPlexGetPartitionBalance(dm, &balance);
1662:   DMPlexSetPartitionBalance(*dmParallel, balance);
1663:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1664:   DMSetPointSF(*dmParallel, sfPoint);
1665:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1666:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1667:   if (flg) {PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);}

1669:   if (overlap > 0) {
1670:     DM                 dmOverlap;
1671:     PetscInt           nroots, nleaves;
1672:     PetscSFNode       *newRemote;
1673:     const PetscSFNode *oldRemote;
1674:     PetscSF            sfOverlap, sfOverlapPoint;
1675:     /* Add the partition overlap to the distributed DM */
1676:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1677:     DMDestroy(dmParallel);
1678:     *dmParallel = dmOverlap;
1679:     if (flg) {
1680:       PetscPrintf(comm, "Overlap Migration SF:\n");
1681:       PetscSFView(sfOverlap, NULL);
1682:     }

1684:     /* Re-map the migration SF to establish the full migration pattern */
1685:     PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);
1686:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1687:     PetscMalloc1(nleaves, &newRemote);
1688:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1689:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1690:     PetscSFCreate(comm, &sfOverlapPoint);
1691:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1692:     PetscSFDestroy(&sfOverlap);
1693:     PetscSFDestroy(&sfMigration);
1694:     sfMigration = sfOverlapPoint;
1695:   }
1696:   /* Cleanup Partition */
1697:   DMLabelDestroy(&lblPartition);
1698:   DMLabelDestroy(&lblMigration);
1699:   PetscSectionDestroy(&cellPartSection);
1700:   ISDestroy(&cellPart);
1701:   /* Copy BC */
1702:   DMCopyBoundary(dm, *dmParallel);
1703:   /* Create sfNatural */
1704:   if (dm->useNatural) {
1705:     PetscSection section;

1707:     DMGetSection(dm, &section);
1708:     DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1709:     DMSetUseNatural(*dmParallel, PETSC_TRUE);
1710:   }
1711:   /* Cleanup */
1712:   if (sf) {*sf = sfMigration;}
1713:   else    {PetscSFDestroy(&sfMigration);}
1714:   PetscSFDestroy(&sfPoint);
1715:   PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1716:   return(0);
1717: }

1719: /*@C
1720:   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.

1722:   Collective on DM

1724:   Input Parameter:
1725: + dm  - The non-overlapping distrbuted DMPlex object
1726: - overlap - The overlap of partitions

1728:   Output Parameter:
1729: + sf - The PetscSF used for point distribution
1730: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1732:   Note: If the mesh was not distributed, the return value is NULL.

1734:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1735:   representation on the mesh.

1737:   Level: intermediate

1739: .keywords: mesh, elements
1740: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMSetAdjacency()
1741: @*/
1742: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1743: {
1744:   MPI_Comm               comm;
1745:   PetscMPIInt            size, rank;
1746:   PetscSection           rootSection, leafSection;
1747:   IS                     rootrank, leafrank;
1748:   DM                     dmCoord;
1749:   DMLabel                lblOverlap;
1750:   PetscSF                sfOverlap, sfStratified, sfPoint;
1751:   PetscErrorCode         ierr;


1758:   if (sf) *sf = NULL;
1759:   *dmOverlap  = NULL;
1760:   PetscObjectGetComm((PetscObject)dm,&comm);
1761:   MPI_Comm_size(comm, &size);
1762:   MPI_Comm_rank(comm, &rank);
1763:   if (size == 1) return(0);

1765:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1766:   /* Compute point overlap with neighbouring processes on the distributed DM */
1767:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1768:   PetscSectionCreate(comm, &rootSection);
1769:   PetscSectionCreate(comm, &leafSection);
1770:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1771:   DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1772:   /* Convert overlap label to stratified migration SF */
1773:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1774:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1775:   PetscSFDestroy(&sfOverlap);
1776:   sfOverlap = sfStratified;
1777:   PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1778:   PetscSFSetFromOptions(sfOverlap);

1780:   PetscSectionDestroy(&rootSection);
1781:   PetscSectionDestroy(&leafSection);
1782:   ISDestroy(&rootrank);
1783:   ISDestroy(&leafrank);
1784:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);

1786:   /* Build the overlapping DM */
1787:   DMPlexCreate(comm, dmOverlap);
1788:   PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1789:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1790:   /* Build the new point SF */
1791:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1792:   DMSetPointSF(*dmOverlap, sfPoint);
1793:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
1794:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1795:   PetscSFDestroy(&sfPoint);
1796:   /* Cleanup overlap partition */
1797:   DMLabelDestroy(&lblOverlap);
1798:   if (sf) *sf = sfOverlap;
1799:   else    {PetscSFDestroy(&sfOverlap);}
1800:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1801:   return(0);
1802: }

1804: /*@C
1805:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1806:   root process of the original's communicator.
1807:   
1808:   Collective on DM

1810:   Input Parameters:
1811: . dm - the original DMPlex object

1813:   Output Parameters:
1814: + sf - the PetscSF used for point distribution (optional)
1815: - gatherMesh - the gathered DM object, or NULL

1817:   Level: intermediate

1819: .keywords: mesh
1820: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1821: @*/
1822: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1823: {
1824:   MPI_Comm       comm;
1825:   PetscMPIInt    size;
1826:   PetscPartitioner oldPart, gatherPart;

1832:   *gatherMesh = NULL;
1833:   if (sf) *sf = NULL;
1834:   comm = PetscObjectComm((PetscObject)dm);
1835:   MPI_Comm_size(comm,&size);
1836:   if (size == 1) return(0);
1837:   DMPlexGetPartitioner(dm,&oldPart);
1838:   PetscObjectReference((PetscObject)oldPart);
1839:   PetscPartitionerCreate(comm,&gatherPart);
1840:   PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1841:   DMPlexSetPartitioner(dm,gatherPart);
1842:   DMPlexDistribute(dm,0,sf,gatherMesh);

1844:   DMPlexSetPartitioner(dm,oldPart);
1845:   PetscPartitionerDestroy(&gatherPart);
1846:   PetscPartitionerDestroy(&oldPart);
1847:   return(0);
1848: }

1850: /*@C
1851:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1852:   
1853:   Collective on DM

1855:   Input Parameters:
1856: . dm - the original DMPlex object

1858:   Output Parameters:
1859: + sf - the PetscSF used for point distribution (optional)
1860: - redundantMesh - the redundant DM object, or NULL

1862:   Level: intermediate

1864: .keywords: mesh
1865: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1866: @*/
1867: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1868: {
1869:   MPI_Comm       comm;
1870:   PetscMPIInt    size, rank;
1871:   PetscInt       pStart, pEnd, p;
1872:   PetscInt       numPoints = -1;
1873:   PetscSF        migrationSF, sfPoint, gatherSF;
1874:   DM             gatherDM, dmCoord;
1875:   PetscSFNode    *points;

1881:   *redundantMesh = NULL;
1882:   comm = PetscObjectComm((PetscObject)dm);
1883:   MPI_Comm_size(comm,&size);
1884:   if (size == 1) {
1885:     PetscObjectReference((PetscObject) dm);
1886:     *redundantMesh = dm;
1887:     if (sf) *sf = NULL;
1888:     return(0);
1889:   }
1890:   DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);
1891:   if (!gatherDM) return(0);
1892:   MPI_Comm_rank(comm,&rank);
1893:   DMPlexGetChart(gatherDM,&pStart,&pEnd);
1894:   numPoints = pEnd - pStart;
1895:   MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1896:   PetscMalloc1(numPoints,&points);
1897:   PetscSFCreate(comm,&migrationSF);
1898:   for (p = 0; p < numPoints; p++) {
1899:     points[p].index = p;
1900:     points[p].rank  = 0;
1901:   }
1902:   PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1903:   DMPlexCreate(comm, redundantMesh);
1904:   PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1905:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1906:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1907:   DMSetPointSF(*redundantMesh, sfPoint);
1908:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
1909:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1910:   PetscSFDestroy(&sfPoint);
1911:   if (sf) {
1912:     PetscSF tsf;

1914:     PetscSFCompose(gatherSF,migrationSF,&tsf);
1915:     DMPlexStratifyMigrationSF(dm, tsf, sf);
1916:     PetscSFDestroy(&tsf);
1917:   }
1918:   PetscSFDestroy(&migrationSF);
1919:   PetscSFDestroy(&gatherSF);
1920:   DMDestroy(&gatherDM);
1921:   return(0);
1922: }