Actual source code: matrix.c
petsc-3.9.2 2018-05-20
2: /*
3: This is where the abstract matrix operations are defined
4: */
6: #include <petsc/private/matimpl.h>
7: #include <petsc/private/isimpl.h>
8: #include <petsc/private/vecimpl.h>
10: /* Logging support */
11: PetscClassId MAT_CLASSID;
12: PetscClassId MAT_COLORING_CLASSID;
13: PetscClassId MAT_FDCOLORING_CLASSID;
14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;
16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve;
18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_CreateSubMats, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_CreateSubMat;
24: PetscLogEvent MAT_TransposeColoringCreate;
25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
34: PetscLogEvent MAT_GetMultiProcBlock;
35: PetscLogEvent MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch, MAT_SetValuesBatchI, MAT_SetValuesBatchII, MAT_SetValuesBatchIII, MAT_SetValuesBatchIV;
36: PetscLogEvent MAT_ViennaCLCopyToGPU;
37: PetscLogEvent MAT_Merge,MAT_Residual,MAT_SetRandom;
38: PetscLogEvent MATCOLORING_Apply,MATCOLORING_Comm,MATCOLORING_Local,MATCOLORING_ISCreate,MATCOLORING_SetUp,MATCOLORING_Weights;
40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};
42: /*@
43: MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations
45: Logically Collective on Mat
47: Input Parameters:
48: + x - the matrix
49: - rctx - the random number context, formed by PetscRandomCreate(), or NULL and
50: it will create one internally.
52: Output Parameter:
53: . x - the matrix
55: Example of Usage:
56: .vb
57: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
58: MatSetRandom(x,rctx);
59: PetscRandomDestroy(rctx);
60: .ve
62: Level: intermediate
64: Concepts: matrix^setting to random
65: Concepts: random^matrix
67: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
68: @*/
69: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
70: {
72: PetscRandom randObj = NULL;
79: if (!rctx) {
80: MPI_Comm comm;
81: PetscObjectGetComm((PetscObject)x,&comm);
82: PetscRandomCreate(comm,&randObj);
83: PetscRandomSetFromOptions(randObj);
84: rctx = randObj;
85: }
87: PetscLogEventBegin(MAT_SetRandom,x,rctx,0,0);
88: (*x->ops->setrandom)(x,rctx);
89: PetscLogEventEnd(MAT_SetRandom,x,rctx,0,0);
91: x->assembled = PETSC_TRUE;
92: PetscRandomDestroy(&randObj);
93: return(0);
94: }
96: /*@
97: MatFactorGetErrorZeroPivot - returns the pivot value that was determined to be zero and the row it occurred in
99: Logically Collective on Mat
101: Input Parameters:
102: . mat - the factored matrix
104: Output Parameter:
105: + pivot - the pivot value computed
106: - row - the row that the zero pivot occurred. Note that this row must be interpreted carefully due to row reorderings and which processes
107: the share the matrix
109: Level: advanced
111: Notes: This routine does not work for factorizations done with external packages.
112: This routine should only be called if MatGetFactorError() returns a value of MAT_FACTOR_NUMERIC_ZEROPIVOT
114: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
116: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
117: @*/
118: PetscErrorCode MatFactorGetErrorZeroPivot(Mat mat,PetscReal *pivot,PetscInt *row)
119: {
122: *pivot = mat->factorerror_zeropivot_value;
123: *row = mat->factorerror_zeropivot_row;
124: return(0);
125: }
127: /*@
128: MatFactorGetError - gets the error code from a factorization
130: Logically Collective on Mat
132: Input Parameters:
133: . mat - the factored matrix
135: Output Parameter:
136: . err - the error code
138: Level: advanced
140: Notes: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
142: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
143: @*/
144: PetscErrorCode MatFactorGetError(Mat mat,MatFactorError *err)
145: {
148: *err = mat->factorerrortype;
149: return(0);
150: }
152: /*@
153: MatFactorClearError - clears the error code in a factorization
155: Logically Collective on Mat
157: Input Parameter:
158: . mat - the factored matrix
160: Level: developer
162: Notes: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
164: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorGetError(), MatFactorGetErrorZeroPivot()
165: @*/
166: PetscErrorCode MatFactorClearError(Mat mat)
167: {
170: mat->factorerrortype = MAT_FACTOR_NOERROR;
171: mat->factorerror_zeropivot_value = 0.0;
172: mat->factorerror_zeropivot_row = 0;
173: return(0);
174: }
176: static PetscErrorCode MatFindNonzeroRows_Basic(Mat mat,IS *keptrows)
177: {
178: PetscErrorCode ierr;
179: Vec r,l;
180: const PetscScalar *al;
181: PetscInt i,nz,gnz,N,n;
184: MatGetSize(mat,&N,NULL);
185: MatGetLocalSize(mat,&n,NULL);
186: MatCreateVecs(mat,&r,&l);
187: VecSet(l,0.0);
188: VecSetRandom(r,NULL);
189: MatMult(mat,r,l);
190: VecGetArrayRead(l,&al);
191: for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nz++;
192: MPIU_Allreduce(&nz,&gnz,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)mat));
193: if (gnz != N) {
194: PetscInt *nzr;
195: PetscMalloc1(nz,&nzr);
196: if (nz) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nzr[nz++] = i; }
197: ISCreateGeneral(PetscObjectComm((PetscObject)mat),nz,nzr,PETSC_OWN_POINTER,keptrows);
198: } else *keptrows = NULL;
199: VecRestoreArrayRead(l,&al);
200: VecDestroy(&l);
201: VecDestroy(&r);
202: return(0);
203: }
205: /*@
206: MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix
208: Input Parameter:
209: . A - the matrix
211: Output Parameter:
212: . keptrows - the rows that are not completely zero
214: Notes: keptrows is set to NULL if all rows are nonzero.
216: Level: intermediate
218: @*/
219: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
220: {
227: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
228: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
229: if (!mat->ops->findnonzerorows) {
230: MatFindNonzeroRows_Basic(mat,keptrows);
231: } else {
232: (*mat->ops->findnonzerorows)(mat,keptrows);
233: }
234: return(0);
235: }
237: /*@
238: MatFindZeroRows - Locate all rows that are completely zero in the matrix
240: Input Parameter:
241: . A - the matrix
243: Output Parameter:
244: . zerorows - the rows that are completely zero
246: Notes: zerorows is set to NULL if no rows are zero.
248: Level: intermediate
250: @*/
251: PetscErrorCode MatFindZeroRows(Mat mat,IS *zerorows)
252: {
254: IS keptrows;
255: PetscInt m, n;
260: MatFindNonzeroRows(mat, &keptrows);
261: /* MatFindNonzeroRows sets keptrows to NULL if there are no zero rows.
262: In keeping with this convention, we set zerorows to NULL if there are no zero
263: rows. */
264: if (keptrows == NULL) {
265: *zerorows = NULL;
266: } else {
267: MatGetOwnershipRange(mat,&m,&n);
268: ISComplement(keptrows,m,n,zerorows);
269: ISDestroy(&keptrows);
270: }
271: return(0);
272: }
274: /*@
275: MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling
277: Not Collective
279: Input Parameters:
280: . A - the matrix
282: Output Parameters:
283: . a - the diagonal part (which is a SEQUENTIAL matrix)
285: Notes: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
286: Use caution, as the reference count on the returned matrix is not incremented and it is used as
287: part of the containing MPI Mat's normal operation.
289: Level: advanced
291: @*/
292: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
293: {
300: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
301: if (!A->ops->getdiagonalblock) {
302: PetscMPIInt size;
303: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
304: if (size == 1) {
305: *a = A;
306: return(0);
307: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not coded for this matrix type");
308: }
309: (*A->ops->getdiagonalblock)(A,a);
310: return(0);
311: }
313: /*@
314: MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.
316: Collective on Mat
318: Input Parameters:
319: . mat - the matrix
321: Output Parameter:
322: . trace - the sum of the diagonal entries
324: Level: advanced
326: @*/
327: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
328: {
330: Vec diag;
333: MatCreateVecs(mat,&diag,NULL);
334: MatGetDiagonal(mat,diag);
335: VecSum(diag,trace);
336: VecDestroy(&diag);
337: return(0);
338: }
340: /*@
341: MatRealPart - Zeros out the imaginary part of the matrix
343: Logically Collective on Mat
345: Input Parameters:
346: . mat - the matrix
348: Level: advanced
351: .seealso: MatImaginaryPart()
352: @*/
353: PetscErrorCode MatRealPart(Mat mat)
354: {
360: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
361: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
362: if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
363: MatCheckPreallocated(mat,1);
364: (*mat->ops->realpart)(mat);
365: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
366: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
367: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
368: }
369: #endif
370: return(0);
371: }
373: /*@C
374: MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix
376: Collective on Mat
378: Input Parameter:
379: . mat - the matrix
381: Output Parameters:
382: + nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
383: - ghosts - the global indices of the ghost points
385: Notes: the nghosts and ghosts are suitable to pass into VecCreateGhost()
387: Level: advanced
389: @*/
390: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
391: {
397: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
398: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
399: if (!mat->ops->getghosts) {
400: if (nghosts) *nghosts = 0;
401: if (ghosts) *ghosts = 0;
402: } else {
403: (*mat->ops->getghosts)(mat,nghosts,ghosts);
404: }
405: return(0);
406: }
409: /*@
410: MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part
412: Logically Collective on Mat
414: Input Parameters:
415: . mat - the matrix
417: Level: advanced
420: .seealso: MatRealPart()
421: @*/
422: PetscErrorCode MatImaginaryPart(Mat mat)
423: {
429: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
430: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
431: if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
432: MatCheckPreallocated(mat,1);
433: (*mat->ops->imaginarypart)(mat);
434: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
435: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
436: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
437: }
438: #endif
439: return(0);
440: }
442: /*@
443: MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)
445: Not Collective
447: Input Parameter:
448: . mat - the matrix
450: Output Parameters:
451: + missing - is any diagonal missing
452: - dd - first diagonal entry that is missing (optional) on this process
454: Level: advanced
457: .seealso: MatRealPart()
458: @*/
459: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
460: {
466: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
467: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
468: if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
469: (*mat->ops->missingdiagonal)(mat,missing,dd);
470: return(0);
471: }
473: /*@C
474: MatGetRow - Gets a row of a matrix. You MUST call MatRestoreRow()
475: for each row that you get to ensure that your application does
476: not bleed memory.
478: Not Collective
480: Input Parameters:
481: + mat - the matrix
482: - row - the row to get
484: Output Parameters:
485: + ncols - if not NULL, the number of nonzeros in the row
486: . cols - if not NULL, the column numbers
487: - vals - if not NULL, the values
489: Notes:
490: This routine is provided for people who need to have direct access
491: to the structure of a matrix. We hope that we provide enough
492: high-level matrix routines that few users will need it.
494: MatGetRow() always returns 0-based column indices, regardless of
495: whether the internal representation is 0-based (default) or 1-based.
497: For better efficiency, set cols and/or vals to NULL if you do
498: not wish to extract these quantities.
500: The user can only examine the values extracted with MatGetRow();
501: the values cannot be altered. To change the matrix entries, one
502: must use MatSetValues().
504: You can only have one call to MatGetRow() outstanding for a particular
505: matrix at a time, per processor. MatGetRow() can only obtain rows
506: associated with the given processor, it cannot get rows from the
507: other processors; for that we suggest using MatCreateSubMatrices(), then
508: MatGetRow() on the submatrix. The row index passed to MatGetRows()
509: is in the global number of rows.
511: Fortran Notes:
512: The calling sequence from Fortran is
513: .vb
514: MatGetRow(matrix,row,ncols,cols,values,ierr)
515: Mat matrix (input)
516: integer row (input)
517: integer ncols (output)
518: integer cols(maxcols) (output)
519: double precision (or double complex) values(maxcols) output
520: .ve
521: where maxcols >= maximum nonzeros in any row of the matrix.
524: Caution:
525: Do not try to change the contents of the output arrays (cols and vals).
526: In some cases, this may corrupt the matrix.
528: Level: advanced
530: Concepts: matrices^row access
532: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatCreateSubMatrices(), MatGetDiagonal()
533: @*/
534: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
535: {
537: PetscInt incols;
542: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
543: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
544: if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
545: MatCheckPreallocated(mat,1);
546: PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
547: (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
548: if (ncols) *ncols = incols;
549: PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
550: return(0);
551: }
553: /*@
554: MatConjugate - replaces the matrix values with their complex conjugates
556: Logically Collective on Mat
558: Input Parameters:
559: . mat - the matrix
561: Level: advanced
563: .seealso: VecConjugate()
564: @*/
565: PetscErrorCode MatConjugate(Mat mat)
566: {
567: #if defined(PETSC_USE_COMPLEX)
572: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
573: if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
574: (*mat->ops->conjugate)(mat);
575: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
576: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
577: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
578: }
579: #endif
580: return(0);
581: #else
582: return 0;
583: #endif
584: }
586: /*@C
587: MatRestoreRow - Frees any temporary space allocated by MatGetRow().
589: Not Collective
591: Input Parameters:
592: + mat - the matrix
593: . row - the row to get
594: . ncols, cols - the number of nonzeros and their columns
595: - vals - if nonzero the column values
597: Notes:
598: This routine should be called after you have finished examining the entries.
600: This routine zeros out ncols, cols, and vals. This is to prevent accidental
601: us of the array after it has been restored. If you pass NULL, it will
602: not zero the pointers. Use of cols or vals after MatRestoreRow is invalid.
604: Fortran Notes:
605: The calling sequence from Fortran is
606: .vb
607: MatRestoreRow(matrix,row,ncols,cols,values,ierr)
608: Mat matrix (input)
609: integer row (input)
610: integer ncols (output)
611: integer cols(maxcols) (output)
612: double precision (or double complex) values(maxcols) output
613: .ve
614: Where maxcols >= maximum nonzeros in any row of the matrix.
616: In Fortran MatRestoreRow() MUST be called after MatGetRow()
617: before another call to MatGetRow() can be made.
619: Level: advanced
621: .seealso: MatGetRow()
622: @*/
623: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
624: {
630: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
631: if (!mat->ops->restorerow) return(0);
632: (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
633: if (ncols) *ncols = 0;
634: if (cols) *cols = NULL;
635: if (vals) *vals = NULL;
636: return(0);
637: }
639: /*@
640: MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
641: You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.
643: Not Collective
645: Input Parameters:
646: + mat - the matrix
648: Notes:
649: The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.
651: Level: advanced
653: Concepts: matrices^row access
655: .seealso: MatRestoreRowRowUpperTriangular()
656: @*/
657: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
658: {
664: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
665: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
666: if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
667: MatCheckPreallocated(mat,1);
668: (*mat->ops->getrowuppertriangular)(mat);
669: return(0);
670: }
672: /*@
673: MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.
675: Not Collective
677: Input Parameters:
678: + mat - the matrix
680: Notes:
681: This routine should be called after you have finished MatGetRow/MatRestoreRow().
684: Level: advanced
686: .seealso: MatGetRowUpperTriangular()
687: @*/
688: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
689: {
694: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
695: if (!mat->ops->restorerowuppertriangular) return(0);
696: (*mat->ops->restorerowuppertriangular)(mat);
697: return(0);
698: }
700: /*@C
701: MatSetOptionsPrefix - Sets the prefix used for searching for all
702: Mat options in the database.
704: Logically Collective on Mat
706: Input Parameter:
707: + A - the Mat context
708: - prefix - the prefix to prepend to all option names
710: Notes:
711: A hyphen (-) must NOT be given at the beginning of the prefix name.
712: The first character of all runtime options is AUTOMATICALLY the hyphen.
714: Level: advanced
716: .keywords: Mat, set, options, prefix, database
718: .seealso: MatSetFromOptions()
719: @*/
720: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
721: {
726: PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
727: return(0);
728: }
730: /*@C
731: MatAppendOptionsPrefix - Appends to the prefix used for searching for all
732: Mat options in the database.
734: Logically Collective on Mat
736: Input Parameters:
737: + A - the Mat context
738: - prefix - the prefix to prepend to all option names
740: Notes:
741: A hyphen (-) must NOT be given at the beginning of the prefix name.
742: The first character of all runtime options is AUTOMATICALLY the hyphen.
744: Level: advanced
746: .keywords: Mat, append, options, prefix, database
748: .seealso: MatGetOptionsPrefix()
749: @*/
750: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
751: {
756: PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
757: return(0);
758: }
760: /*@C
761: MatGetOptionsPrefix - Sets the prefix used for searching for all
762: Mat options in the database.
764: Not Collective
766: Input Parameter:
767: . A - the Mat context
769: Output Parameter:
770: . prefix - pointer to the prefix string used
772: Notes: On the fortran side, the user should pass in a string 'prefix' of
773: sufficient length to hold the prefix.
775: Level: advanced
777: .keywords: Mat, get, options, prefix, database
779: .seealso: MatAppendOptionsPrefix()
780: @*/
781: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
782: {
787: PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
788: return(0);
789: }
791: /*@
792: MatResetPreallocation - Reset mat to use the original nonzero pattern provided by users.
794: Collective on Mat
796: Input Parameters:
797: . A - the Mat context
799: Notes:
800: The allocated memory will be shrunk after calling MatAssembly with MAT_FINAL_ASSEMBLY. Users can reset the preallocation to access the original memory.
801: Currently support MPIAIJ and SEQAIJ.
803: Level: beginner
805: .keywords: Mat, ResetPreallocation
807: .seealso: MatSeqAIJSetPreallocation(), MatMPIAIJSetPreallocation(), MatXAIJSetPreallocation()
808: @*/
809: PetscErrorCode MatResetPreallocation(Mat A)
810: {
816: PetscUseMethod(A,"MatResetPreallocation_C",(Mat),(A));
817: return(0);
818: }
821: /*@
822: MatSetUp - Sets up the internal matrix data structures for the later use.
824: Collective on Mat
826: Input Parameters:
827: . A - the Mat context
829: Notes:
830: If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.
832: If a suitable preallocation routine is used, this function does not need to be called.
834: See the Performance chapter of the PETSc users manual for how to preallocate matrices
836: Level: beginner
838: .keywords: Mat, setup
840: .seealso: MatCreate(), MatDestroy()
841: @*/
842: PetscErrorCode MatSetUp(Mat A)
843: {
844: PetscMPIInt size;
849: if (!((PetscObject)A)->type_name) {
850: MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
851: if (size == 1) {
852: MatSetType(A, MATSEQAIJ);
853: } else {
854: MatSetType(A, MATMPIAIJ);
855: }
856: }
857: if (!A->preallocated && A->ops->setup) {
858: PetscInfo(A,"Warning not preallocating matrix storage\n");
859: (*A->ops->setup)(A);
860: }
861: PetscLayoutSetUp(A->rmap);
862: PetscLayoutSetUp(A->cmap);
863: A->preallocated = PETSC_TRUE;
864: return(0);
865: }
867: #if defined(PETSC_HAVE_SAWS)
868: #include <petscviewersaws.h>
869: #endif
870: /*@C
871: MatView - Visualizes a matrix object.
873: Collective on Mat
875: Input Parameters:
876: + mat - the matrix
877: - viewer - visualization context
879: Notes:
880: The available visualization contexts include
881: + PETSC_VIEWER_STDOUT_SELF - for sequential matrices
882: . PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
883: . PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
884: - PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure
886: The user can open alternative visualization contexts with
887: + PetscViewerASCIIOpen() - Outputs matrix to a specified file
888: . PetscViewerBinaryOpen() - Outputs matrix in binary to a
889: specified file; corresponding input uses MatLoad()
890: . PetscViewerDrawOpen() - Outputs nonzero matrix structure to
891: an X window display
892: - PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
893: Currently only the sequential dense and AIJ
894: matrix types support the Socket viewer.
896: The user can call PetscViewerPushFormat() to specify the output
897: format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
898: PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen). Available formats include
899: + PETSC_VIEWER_DEFAULT - default, prints matrix contents
900: . PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
901: . PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
902: . PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
903: format common among all matrix types
904: . PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
905: format (which is in many cases the same as the default)
906: . PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
907: size and structure (not the matrix entries)
908: . PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
909: the matrix structure
911: Options Database Keys:
912: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatAssemblyEnd()
913: . -mat_view ::ascii_info_detail - Prints more detailed info
914: . -mat_view - Prints matrix in ASCII format
915: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
916: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
917: . -display <name> - Sets display name (default is host)
918: . -draw_pause <sec> - Sets number of seconds to pause after display
919: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 12 Using MATLAB with PETSc for details)
920: . -viewer_socket_machine <machine> -
921: . -viewer_socket_port <port> -
922: . -mat_view binary - save matrix to file in binary format
923: - -viewer_binary_filename <name> -
924: Level: beginner
926: Notes: see the manual page for MatLoad() for the exact format of the binary file when the binary
927: viewer is used.
929: See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
930: viewer is used.
932: One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
933: And then use the following mouse functions:
934: left mouse: zoom in
935: middle mouse: zoom out
936: right mouse: continue with the simulation
938: Concepts: matrices^viewing
939: Concepts: matrices^plotting
940: Concepts: matrices^printing
942: .seealso: PetscViewerPushFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
943: PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
944: @*/
945: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
946: {
947: PetscErrorCode ierr;
948: PetscInt rows,cols,rbs,cbs;
949: PetscBool iascii,ibinary;
950: PetscViewerFormat format;
951: PetscMPIInt size;
952: #if defined(PETSC_HAVE_SAWS)
953: PetscBool issaws;
954: #endif
959: if (!viewer) {
960: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
961: }
964: MatCheckPreallocated(mat,1);
965: PetscViewerGetFormat(viewer,&format);
966: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
967: if (size == 1 && format == PETSC_VIEWER_LOAD_BALANCE) return(0);
968: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&ibinary);
969: if (ibinary) {
970: PetscBool mpiio;
971: PetscViewerBinaryGetUseMPIIO(viewer,&mpiio);
972: if (mpiio) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"PETSc matrix viewers do not support using MPI-IO, turn off that flag");
973: }
975: PetscLogEventBegin(MAT_View,mat,viewer,0,0);
976: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
977: if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
978: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
979: }
981: #if defined(PETSC_HAVE_SAWS)
982: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
983: #endif
984: if (iascii) {
985: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
986: PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
987: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
988: PetscViewerASCIIPushTab(viewer);
989: MatGetSize(mat,&rows,&cols);
990: MatGetBlockSizes(mat,&rbs,&cbs);
991: if (rbs != 1 || cbs != 1) {
992: if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
993: else {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
994: } else {
995: PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
996: }
997: if (mat->factortype) {
998: MatSolverType solver;
999: MatFactorGetSolverType(mat,&solver);
1000: PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
1001: }
1002: if (mat->ops->getinfo) {
1003: MatInfo info;
1004: MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
1005: PetscViewerASCIIPrintf(viewer,"total: nonzeros=%.f, allocated nonzeros=%.f\n",info.nz_used,info.nz_allocated);
1006: PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
1007: }
1008: if (mat->nullsp) {PetscViewerASCIIPrintf(viewer," has attached null space\n");}
1009: if (mat->nearnullsp) {PetscViewerASCIIPrintf(viewer," has attached near null space\n");}
1010: }
1011: #if defined(PETSC_HAVE_SAWS)
1012: } else if (issaws) {
1013: PetscMPIInt rank;
1015: PetscObjectName((PetscObject)mat);
1016: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
1017: if (!((PetscObject)mat)->amsmem && !rank) {
1018: PetscObjectViewSAWs((PetscObject)mat,viewer);
1019: }
1020: #endif
1021: }
1022: if ((format == PETSC_VIEWER_NATIVE || format == PETSC_VIEWER_LOAD_BALANCE) && mat->ops->viewnative) {
1023: PetscViewerASCIIPushTab(viewer);
1024: (*mat->ops->viewnative)(mat,viewer);
1025: PetscViewerASCIIPopTab(viewer);
1026: } else if (mat->ops->view) {
1027: PetscViewerASCIIPushTab(viewer);
1028: (*mat->ops->view)(mat,viewer);
1029: PetscViewerASCIIPopTab(viewer);
1030: }
1031: if (iascii) {
1032: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
1033: PetscViewerGetFormat(viewer,&format);
1034: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1035: PetscViewerASCIIPopTab(viewer);
1036: }
1037: }
1038: PetscLogEventEnd(MAT_View,mat,viewer,0,0);
1039: return(0);
1040: }
1042: #if defined(PETSC_USE_DEBUG)
1043: #include <../src/sys/totalview/tv_data_display.h>
1044: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
1045: {
1046: TV_add_row("Local rows", "int", &mat->rmap->n);
1047: TV_add_row("Local columns", "int", &mat->cmap->n);
1048: TV_add_row("Global rows", "int", &mat->rmap->N);
1049: TV_add_row("Global columns", "int", &mat->cmap->N);
1050: TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
1051: return TV_format_OK;
1052: }
1053: #endif
1055: /*@C
1056: MatLoad - Loads a matrix that has been stored in binary format
1057: with MatView(). The matrix format is determined from the options database.
1058: Generates a parallel MPI matrix if the communicator has more than one
1059: processor. The default matrix type is AIJ.
1061: Collective on PetscViewer
1063: Input Parameters:
1064: + newmat - the newly loaded matrix, this needs to have been created with MatCreate()
1065: or some related function before a call to MatLoad()
1066: - viewer - binary file viewer, created with PetscViewerBinaryOpen()
1068: Options Database Keys:
1069: Used with block matrix formats (MATSEQBAIJ, ...) to specify
1070: block size
1071: . -matload_block_size <bs>
1073: Level: beginner
1075: Notes:
1076: If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
1077: Mat before calling this routine if you wish to set it from the options database.
1079: MatLoad() automatically loads into the options database any options
1080: given in the file filename.info where filename is the name of the file
1081: that was passed to the PetscViewerBinaryOpen(). The options in the info
1082: file will be ignored if you use the -viewer_binary_skip_info option.
1084: If the type or size of newmat is not set before a call to MatLoad, PETSc
1085: sets the default matrix type AIJ and sets the local and global sizes.
1086: If type and/or size is already set, then the same are used.
1088: In parallel, each processor can load a subset of rows (or the
1089: entire matrix). This routine is especially useful when a large
1090: matrix is stored on disk and only part of it is desired on each
1091: processor. For example, a parallel solver may access only some of
1092: the rows from each processor. The algorithm used here reads
1093: relatively small blocks of data rather than reading the entire
1094: matrix and then subsetting it.
1096: Notes for advanced users:
1097: Most users should not need to know the details of the binary storage
1098: format, since MatLoad() and MatView() completely hide these details.
1099: But for anyone who's interested, the standard binary matrix storage
1100: format is
1102: $ int MAT_FILE_CLASSID
1103: $ int number of rows
1104: $ int number of columns
1105: $ int total number of nonzeros
1106: $ int *number nonzeros in each row
1107: $ int *column indices of all nonzeros (starting index is zero)
1108: $ PetscScalar *values of all nonzeros
1110: PETSc automatically does the byte swapping for
1111: machines that store the bytes reversed, e.g. DEC alpha, freebsd,
1112: linux, Windows and the paragon; thus if you write your own binary
1113: read/write routines you have to swap the bytes; see PetscBinaryRead()
1114: and PetscBinaryWrite() to see how this may be done.
1116: .keywords: matrix, load, binary, input
1118: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()
1120: @*/
1121: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
1122: {
1124: PetscBool isbinary,flg;
1129: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1130: if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");
1132: if (!((PetscObject)newmat)->type_name) {
1133: MatSetType(newmat,MATAIJ);
1134: }
1136: if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
1137: PetscLogEventBegin(MAT_Load,viewer,0,0,0);
1138: (*newmat->ops->load)(newmat,viewer);
1139: PetscLogEventEnd(MAT_Load,viewer,0,0,0);
1141: flg = PETSC_FALSE;
1142: PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1143: if (flg) {
1144: MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1145: MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1146: }
1147: flg = PETSC_FALSE;
1148: PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1149: if (flg) {
1150: MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1151: }
1152: return(0);
1153: }
1155: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1156: {
1158: Mat_Redundant *redund = *redundant;
1159: PetscInt i;
1162: if (redund){
1163: if (redund->matseq) { /* via MatCreateSubMatrices() */
1164: ISDestroy(&redund->isrow);
1165: ISDestroy(&redund->iscol);
1166: MatDestroySubMatrices(1,&redund->matseq);
1167: } else {
1168: PetscFree2(redund->send_rank,redund->recv_rank);
1169: PetscFree(redund->sbuf_j);
1170: PetscFree(redund->sbuf_a);
1171: for (i=0; i<redund->nrecvs; i++) {
1172: PetscFree(redund->rbuf_j[i]);
1173: PetscFree(redund->rbuf_a[i]);
1174: }
1175: PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1176: }
1178: if (redund->subcomm) {
1179: PetscCommDestroy(&redund->subcomm);
1180: }
1181: PetscFree(redund);
1182: }
1183: return(0);
1184: }
1186: /*@
1187: MatDestroy - Frees space taken by a matrix.
1189: Collective on Mat
1191: Input Parameter:
1192: . A - the matrix
1194: Level: beginner
1196: @*/
1197: PetscErrorCode MatDestroy(Mat *A)
1198: {
1202: if (!*A) return(0);
1204: if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}
1206: /* if memory was published with SAWs then destroy it */
1207: PetscObjectSAWsViewOff((PetscObject)*A);
1208: if ((*A)->ops->destroy) {
1209: (*(*A)->ops->destroy)(*A);
1210: }
1212: PetscFree((*A)->solvertype);
1213: MatDestroy_Redundant(&(*A)->redundant);
1214: MatNullSpaceDestroy(&(*A)->nullsp);
1215: MatNullSpaceDestroy(&(*A)->transnullsp);
1216: MatNullSpaceDestroy(&(*A)->nearnullsp);
1217: MatDestroy(&(*A)->schur);
1218: PetscLayoutDestroy(&(*A)->rmap);
1219: PetscLayoutDestroy(&(*A)->cmap);
1220: PetscHeaderDestroy(A);
1221: return(0);
1222: }
1224: /*@C
1225: MatSetValues - Inserts or adds a block of values into a matrix.
1226: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1227: MUST be called after all calls to MatSetValues() have been completed.
1229: Not Collective
1231: Input Parameters:
1232: + mat - the matrix
1233: . v - a logically two-dimensional array of values
1234: . m, idxm - the number of rows and their global indices
1235: . n, idxn - the number of columns and their global indices
1236: - addv - either ADD_VALUES or INSERT_VALUES, where
1237: ADD_VALUES adds values to any existing entries, and
1238: INSERT_VALUES replaces existing entries with new values
1240: Notes:
1241: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1242: MatSetUp() before using this routine
1244: By default the values, v, are row-oriented. See MatSetOption() for other options.
1246: Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1247: options cannot be mixed without intervening calls to the assembly
1248: routines.
1250: MatSetValues() uses 0-based row and column numbers in Fortran
1251: as well as in C.
1253: Negative indices may be passed in idxm and idxn, these rows and columns are
1254: simply ignored. This allows easily inserting element stiffness matrices
1255: with homogeneous Dirchlet boundary conditions that you don't want represented
1256: in the matrix.
1258: Efficiency Alert:
1259: The routine MatSetValuesBlocked() may offer much better efficiency
1260: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1262: Level: beginner
1264: Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
1265: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
1267: Concepts: matrices^putting entries in
1269: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1270: InsertMode, INSERT_VALUES, ADD_VALUES
1271: @*/
1272: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1273: {
1275: #if defined(PETSC_USE_DEBUG)
1276: PetscInt i,j;
1277: #endif
1282: if (!m || !n) return(0); /* no values to insert */
1286: MatCheckPreallocated(mat,1);
1287: if (mat->insertmode == NOT_SET_VALUES) {
1288: mat->insertmode = addv;
1289: }
1290: #if defined(PETSC_USE_DEBUG)
1291: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1292: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1293: if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1295: for (i=0; i<m; i++) {
1296: for (j=0; j<n; j++) {
1297: if (mat->erroriffailure && PetscIsInfOrNanScalar(v[i*n+j]))
1298: #if defined(PETSC_USE_COMPLEX)
1299: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1300: #else
1301: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1302: #endif
1303: }
1304: }
1305: #endif
1307: if (mat->assembled) {
1308: mat->was_assembled = PETSC_TRUE;
1309: mat->assembled = PETSC_FALSE;
1310: }
1311: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1312: (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1313: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1314: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1315: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1316: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1317: }
1318: #endif
1319: return(0);
1320: }
1323: /*@
1324: MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1325: values into a matrix
1327: Not Collective
1329: Input Parameters:
1330: + mat - the matrix
1331: . row - the (block) row to set
1332: - v - a logically two-dimensional array of values
1334: Notes:
1335: By the values, v, are column-oriented (for the block version) and sorted
1337: All the nonzeros in the row must be provided
1339: The matrix must have previously had its column indices set
1341: The row must belong to this process
1343: Level: intermediate
1345: Concepts: matrices^putting entries in
1347: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1348: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1349: @*/
1350: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1351: {
1353: PetscInt globalrow;
1359: ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1360: MatSetValuesRow(mat,globalrow,v);
1361: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1362: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1363: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1364: }
1365: #endif
1366: return(0);
1367: }
1369: /*@
1370: MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1371: values into a matrix
1373: Not Collective
1375: Input Parameters:
1376: + mat - the matrix
1377: . row - the (block) row to set
1378: - v - a logically two-dimensional (column major) array of values for block matrices with blocksize larger than one, otherwise a one dimensional array of values
1380: Notes:
1381: The values, v, are column-oriented for the block version.
1383: All the nonzeros in the row must be provided
1385: THE MATRIX MUST HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.
1387: The row must belong to this process
1389: Level: advanced
1391: Concepts: matrices^putting entries in
1393: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1394: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1395: @*/
1396: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1397: {
1403: MatCheckPreallocated(mat,1);
1405: #if defined(PETSC_USE_DEBUG)
1406: if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1407: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1408: #endif
1409: mat->insertmode = INSERT_VALUES;
1411: if (mat->assembled) {
1412: mat->was_assembled = PETSC_TRUE;
1413: mat->assembled = PETSC_FALSE;
1414: }
1415: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1416: if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1417: (*mat->ops->setvaluesrow)(mat,row,v);
1418: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1419: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1420: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1421: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1422: }
1423: #endif
1424: return(0);
1425: }
1427: /*@
1428: MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1429: Using structured grid indexing
1431: Not Collective
1433: Input Parameters:
1434: + mat - the matrix
1435: . m - number of rows being entered
1436: . idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1437: . n - number of columns being entered
1438: . idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1439: . v - a logically two-dimensional array of values
1440: - addv - either ADD_VALUES or INSERT_VALUES, where
1441: ADD_VALUES adds values to any existing entries, and
1442: INSERT_VALUES replaces existing entries with new values
1444: Notes:
1445: By default the values, v, are row-oriented. See MatSetOption() for other options.
1447: Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1448: options cannot be mixed without intervening calls to the assembly
1449: routines.
1451: The grid coordinates are across the entire grid, not just the local portion
1453: MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1454: as well as in C.
1456: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1458: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1459: or call MatSetLocalToGlobalMapping() and MatSetStencil() first.
1461: The columns and rows in the stencil passed in MUST be contained within the
1462: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1463: if you create a DMDA with an overlap of one grid level and on a particular process its first
1464: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1465: first i index you can use in your column and row indices in MatSetStencil() is 5.
1467: In Fortran idxm and idxn should be declared as
1468: $ MatStencil idxm(4,m),idxn(4,n)
1469: and the values inserted using
1470: $ idxm(MatStencil_i,1) = i
1471: $ idxm(MatStencil_j,1) = j
1472: $ idxm(MatStencil_k,1) = k
1473: $ idxm(MatStencil_c,1) = c
1474: etc
1476: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1477: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1478: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1479: DM_BOUNDARY_PERIODIC boundary type.
1481: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1482: a single value per point) you can skip filling those indices.
1484: Inspired by the structured grid interface to the HYPRE package
1485: (http://www.llnl.gov/CASC/hypre)
1487: Efficiency Alert:
1488: The routine MatSetValuesBlockedStencil() may offer much better efficiency
1489: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1491: Level: beginner
1493: Concepts: matrices^putting entries in
1495: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1496: MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1497: @*/
1498: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1499: {
1501: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1502: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1503: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1506: if (!m || !n) return(0); /* no values to insert */
1513: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1514: jdxm = buf; jdxn = buf+m;
1515: } else {
1516: PetscMalloc2(m,&bufm,n,&bufn);
1517: jdxm = bufm; jdxn = bufn;
1518: }
1519: for (i=0; i<m; i++) {
1520: for (j=0; j<3-sdim; j++) dxm++;
1521: tmp = *dxm++ - starts[0];
1522: for (j=0; j<dim-1; j++) {
1523: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1524: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1525: }
1526: if (mat->stencil.noc) dxm++;
1527: jdxm[i] = tmp;
1528: }
1529: for (i=0; i<n; i++) {
1530: for (j=0; j<3-sdim; j++) dxn++;
1531: tmp = *dxn++ - starts[0];
1532: for (j=0; j<dim-1; j++) {
1533: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1534: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1535: }
1536: if (mat->stencil.noc) dxn++;
1537: jdxn[i] = tmp;
1538: }
1539: MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1540: PetscFree2(bufm,bufn);
1541: return(0);
1542: }
1544: /*@
1545: MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1546: Using structured grid indexing
1548: Not Collective
1550: Input Parameters:
1551: + mat - the matrix
1552: . m - number of rows being entered
1553: . idxm - grid coordinates for matrix rows being entered
1554: . n - number of columns being entered
1555: . idxn - grid coordinates for matrix columns being entered
1556: . v - a logically two-dimensional array of values
1557: - addv - either ADD_VALUES or INSERT_VALUES, where
1558: ADD_VALUES adds values to any existing entries, and
1559: INSERT_VALUES replaces existing entries with new values
1561: Notes:
1562: By default the values, v, are row-oriented and unsorted.
1563: See MatSetOption() for other options.
1565: Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1566: options cannot be mixed without intervening calls to the assembly
1567: routines.
1569: The grid coordinates are across the entire grid, not just the local portion
1571: MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1572: as well as in C.
1574: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1576: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1577: or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.
1579: The columns and rows in the stencil passed in MUST be contained within the
1580: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1581: if you create a DMDA with an overlap of one grid level and on a particular process its first
1582: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1583: first i index you can use in your column and row indices in MatSetStencil() is 5.
1585: In Fortran idxm and idxn should be declared as
1586: $ MatStencil idxm(4,m),idxn(4,n)
1587: and the values inserted using
1588: $ idxm(MatStencil_i,1) = i
1589: $ idxm(MatStencil_j,1) = j
1590: $ idxm(MatStencil_k,1) = k
1591: etc
1593: Negative indices may be passed in idxm and idxn, these rows and columns are
1594: simply ignored. This allows easily inserting element stiffness matrices
1595: with homogeneous Dirchlet boundary conditions that you don't want represented
1596: in the matrix.
1598: Inspired by the structured grid interface to the HYPRE package
1599: (http://www.llnl.gov/CASC/hypre)
1601: Level: beginner
1603: Concepts: matrices^putting entries in
1605: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1606: MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1607: MatSetBlockSize(), MatSetLocalToGlobalMapping()
1608: @*/
1609: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1610: {
1612: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1613: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1614: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1617: if (!m || !n) return(0); /* no values to insert */
1624: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1625: jdxm = buf; jdxn = buf+m;
1626: } else {
1627: PetscMalloc2(m,&bufm,n,&bufn);
1628: jdxm = bufm; jdxn = bufn;
1629: }
1630: for (i=0; i<m; i++) {
1631: for (j=0; j<3-sdim; j++) dxm++;
1632: tmp = *dxm++ - starts[0];
1633: for (j=0; j<sdim-1; j++) {
1634: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1635: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1636: }
1637: dxm++;
1638: jdxm[i] = tmp;
1639: }
1640: for (i=0; i<n; i++) {
1641: for (j=0; j<3-sdim; j++) dxn++;
1642: tmp = *dxn++ - starts[0];
1643: for (j=0; j<sdim-1; j++) {
1644: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1645: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1646: }
1647: dxn++;
1648: jdxn[i] = tmp;
1649: }
1650: MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1651: PetscFree2(bufm,bufn);
1652: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1653: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1654: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1655: }
1656: #endif
1657: return(0);
1658: }
1660: /*@
1661: MatSetStencil - Sets the grid information for setting values into a matrix via
1662: MatSetValuesStencil()
1664: Not Collective
1666: Input Parameters:
1667: + mat - the matrix
1668: . dim - dimension of the grid 1, 2, or 3
1669: . dims - number of grid points in x, y, and z direction, including ghost points on your processor
1670: . starts - starting point of ghost nodes on your processor in x, y, and z direction
1671: - dof - number of degrees of freedom per node
1674: Inspired by the structured grid interface to the HYPRE package
1675: (www.llnl.gov/CASC/hyper)
1677: For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1678: user.
1680: Level: beginner
1682: Concepts: matrices^putting entries in
1684: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1685: MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1686: @*/
1687: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1688: {
1689: PetscInt i;
1696: mat->stencil.dim = dim + (dof > 1);
1697: for (i=0; i<dim; i++) {
1698: mat->stencil.dims[i] = dims[dim-i-1]; /* copy the values in backwards */
1699: mat->stencil.starts[i] = starts[dim-i-1];
1700: }
1701: mat->stencil.dims[dim] = dof;
1702: mat->stencil.starts[dim] = 0;
1703: mat->stencil.noc = (PetscBool)(dof == 1);
1704: return(0);
1705: }
1707: /*@C
1708: MatSetValuesBlocked - Inserts or adds a block of values into a matrix.
1710: Not Collective
1712: Input Parameters:
1713: + mat - the matrix
1714: . v - a logically two-dimensional array of values
1715: . m, idxm - the number of block rows and their global block indices
1716: . n, idxn - the number of block columns and their global block indices
1717: - addv - either ADD_VALUES or INSERT_VALUES, where
1718: ADD_VALUES adds values to any existing entries, and
1719: INSERT_VALUES replaces existing entries with new values
1721: Notes:
1722: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1723: MatXXXXSetPreallocation() or MatSetUp() before using this routine.
1725: The m and n count the NUMBER of blocks in the row direction and column direction,
1726: NOT the total number of rows/columns; for example, if the block size is 2 and
1727: you are passing in values for rows 2,3,4,5 then m would be 2 (not 4).
1728: The values in idxm would be 1 2; that is the first index for each block divided by
1729: the block size.
1731: Note that you must call MatSetBlockSize() when constructing this matrix (before
1732: preallocating it).
1734: By default the values, v, are row-oriented, so the layout of
1735: v is the same as for MatSetValues(). See MatSetOption() for other options.
1737: Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1738: options cannot be mixed without intervening calls to the assembly
1739: routines.
1741: MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1742: as well as in C.
1744: Negative indices may be passed in idxm and idxn, these rows and columns are
1745: simply ignored. This allows easily inserting element stiffness matrices
1746: with homogeneous Dirchlet boundary conditions that you don't want represented
1747: in the matrix.
1749: Each time an entry is set within a sparse matrix via MatSetValues(),
1750: internal searching must be done to determine where to place the
1751: data in the matrix storage space. By instead inserting blocks of
1752: entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1753: reduced.
1755: Example:
1756: $ Suppose m=n=2 and block size(bs) = 2 The array is
1757: $
1758: $ 1 2 | 3 4
1759: $ 5 6 | 7 8
1760: $ - - - | - - -
1761: $ 9 10 | 11 12
1762: $ 13 14 | 15 16
1763: $
1764: $ v[] should be passed in like
1765: $ v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1766: $
1767: $ If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1768: $ v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]
1770: Level: intermediate
1772: Concepts: matrices^putting entries in blocked
1774: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1775: @*/
1776: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1777: {
1783: if (!m || !n) return(0); /* no values to insert */
1787: MatCheckPreallocated(mat,1);
1788: if (mat->insertmode == NOT_SET_VALUES) {
1789: mat->insertmode = addv;
1790: }
1791: #if defined(PETSC_USE_DEBUG)
1792: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1793: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1794: if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1795: #endif
1797: if (mat->assembled) {
1798: mat->was_assembled = PETSC_TRUE;
1799: mat->assembled = PETSC_FALSE;
1800: }
1801: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1802: if (mat->ops->setvaluesblocked) {
1803: (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1804: } else {
1805: PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1806: PetscInt i,j,bs,cbs;
1807: MatGetBlockSizes(mat,&bs,&cbs);
1808: if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1809: iidxm = buf; iidxn = buf + m*bs;
1810: } else {
1811: PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1812: iidxm = bufr; iidxn = bufc;
1813: }
1814: for (i=0; i<m; i++) {
1815: for (j=0; j<bs; j++) {
1816: iidxm[i*bs+j] = bs*idxm[i] + j;
1817: }
1818: }
1819: for (i=0; i<n; i++) {
1820: for (j=0; j<cbs; j++) {
1821: iidxn[i*cbs+j] = cbs*idxn[i] + j;
1822: }
1823: }
1824: MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1825: PetscFree2(bufr,bufc);
1826: }
1827: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1828: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1829: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1830: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1831: }
1832: #endif
1833: return(0);
1834: }
1836: /*@
1837: MatGetValues - Gets a block of values from a matrix.
1839: Not Collective; currently only returns a local block
1841: Input Parameters:
1842: + mat - the matrix
1843: . v - a logically two-dimensional array for storing the values
1844: . m, idxm - the number of rows and their global indices
1845: - n, idxn - the number of columns and their global indices
1847: Notes:
1848: The user must allocate space (m*n PetscScalars) for the values, v.
1849: The values, v, are then returned in a row-oriented format,
1850: analogous to that used by default in MatSetValues().
1852: MatGetValues() uses 0-based row and column numbers in
1853: Fortran as well as in C.
1855: MatGetValues() requires that the matrix has been assembled
1856: with MatAssemblyBegin()/MatAssemblyEnd(). Thus, calls to
1857: MatSetValues() and MatGetValues() CANNOT be made in succession
1858: without intermediate matrix assembly.
1860: Negative row or column indices will be ignored and those locations in v[] will be
1861: left unchanged.
1863: Level: advanced
1865: Concepts: matrices^accessing values
1867: .seealso: MatGetRow(), MatCreateSubMatrices(), MatSetValues()
1868: @*/
1869: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1870: {
1876: if (!m || !n) return(0);
1880: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1881: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1882: if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1883: MatCheckPreallocated(mat,1);
1885: PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1886: (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1887: PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1888: return(0);
1889: }
1891: /*@
1892: MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1893: the same size. Currently, this can only be called once and creates the given matrix.
1895: Not Collective
1897: Input Parameters:
1898: + mat - the matrix
1899: . nb - the number of blocks
1900: . bs - the number of rows (and columns) in each block
1901: . rows - a concatenation of the rows for each block
1902: - v - a concatenation of logically two-dimensional arrays of values
1904: Notes:
1905: In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.
1907: Level: advanced
1909: Concepts: matrices^putting entries in
1911: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1912: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1913: @*/
1914: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1915: {
1923: #if defined(PETSC_USE_DEBUG)
1924: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1925: #endif
1927: PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1928: if (mat->ops->setvaluesbatch) {
1929: (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1930: } else {
1931: PetscInt b;
1932: for (b = 0; b < nb; ++b) {
1933: MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1934: }
1935: }
1936: PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1937: return(0);
1938: }
1940: /*@
1941: MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1942: the routine MatSetValuesLocal() to allow users to insert matrix entries
1943: using a local (per-processor) numbering.
1945: Not Collective
1947: Input Parameters:
1948: + x - the matrix
1949: . rmapping - row mapping created with ISLocalToGlobalMappingCreate() or ISLocalToGlobalMappingCreateIS()
1950: - cmapping - column mapping
1952: Level: intermediate
1954: Concepts: matrices^local to global mapping
1955: Concepts: local to global mapping^for matrices
1957: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1958: @*/
1959: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1960: {
1969: if (x->ops->setlocaltoglobalmapping) {
1970: (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
1971: } else {
1972: PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
1973: PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
1974: }
1975: return(0);
1976: }
1979: /*@
1980: MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()
1982: Not Collective
1984: Input Parameters:
1985: . A - the matrix
1987: Output Parameters:
1988: + rmapping - row mapping
1989: - cmapping - column mapping
1991: Level: advanced
1993: Concepts: matrices^local to global mapping
1994: Concepts: local to global mapping^for matrices
1996: .seealso: MatSetValuesLocal()
1997: @*/
1998: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
1999: {
2005: if (rmapping) *rmapping = A->rmap->mapping;
2006: if (cmapping) *cmapping = A->cmap->mapping;
2007: return(0);
2008: }
2010: /*@
2011: MatGetLayouts - Gets the PetscLayout objects for rows and columns
2013: Not Collective
2015: Input Parameters:
2016: . A - the matrix
2018: Output Parameters:
2019: + rmap - row layout
2020: - cmap - column layout
2022: Level: advanced
2024: .seealso: MatCreateVecs(), MatGetLocalToGlobalMapping()
2025: @*/
2026: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
2027: {
2033: if (rmap) *rmap = A->rmap;
2034: if (cmap) *cmap = A->cmap;
2035: return(0);
2036: }
2038: /*@C
2039: MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
2040: using a local ordering of the nodes.
2042: Not Collective
2044: Input Parameters:
2045: + mat - the matrix
2046: . nrow, irow - number of rows and their local indices
2047: . ncol, icol - number of columns and their local indices
2048: . y - a logically two-dimensional array of values
2049: - addv - either INSERT_VALUES or ADD_VALUES, where
2050: ADD_VALUES adds values to any existing entries, and
2051: INSERT_VALUES replaces existing entries with new values
2053: Notes:
2054: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2055: MatSetUp() before using this routine
2057: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine
2059: Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
2060: options cannot be mixed without intervening calls to the assembly
2061: routines.
2063: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2064: MUST be called after all calls to MatSetValuesLocal() have been completed.
2066: Level: intermediate
2068: Concepts: matrices^putting entries in with local numbering
2070: Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2071: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
2073: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
2074: MatSetValueLocal()
2075: @*/
2076: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2077: {
2083: MatCheckPreallocated(mat,1);
2084: if (!nrow || !ncol) return(0); /* no values to insert */
2088: if (mat->insertmode == NOT_SET_VALUES) {
2089: mat->insertmode = addv;
2090: }
2091: #if defined(PETSC_USE_DEBUG)
2092: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2093: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2094: if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2095: #endif
2097: if (mat->assembled) {
2098: mat->was_assembled = PETSC_TRUE;
2099: mat->assembled = PETSC_FALSE;
2100: }
2101: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2102: if (mat->ops->setvalueslocal) {
2103: (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2104: } else {
2105: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2106: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2107: irowm = buf; icolm = buf+nrow;
2108: } else {
2109: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2110: irowm = bufr; icolm = bufc;
2111: }
2112: ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2113: ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2114: MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2115: PetscFree2(bufr,bufc);
2116: }
2117: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2118: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
2119: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
2120: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
2121: }
2122: #endif
2123: return(0);
2124: }
2126: /*@C
2127: MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2128: using a local ordering of the nodes a block at a time.
2130: Not Collective
2132: Input Parameters:
2133: + x - the matrix
2134: . nrow, irow - number of rows and their local indices
2135: . ncol, icol - number of columns and their local indices
2136: . y - a logically two-dimensional array of values
2137: - addv - either INSERT_VALUES or ADD_VALUES, where
2138: ADD_VALUES adds values to any existing entries, and
2139: INSERT_VALUES replaces existing entries with new values
2141: Notes:
2142: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2143: MatSetUp() before using this routine
2145: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2146: before using this routineBefore calling MatSetValuesLocal(), the user must first set the
2148: Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2149: options cannot be mixed without intervening calls to the assembly
2150: routines.
2152: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2153: MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.
2155: Level: intermediate
2157: Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2158: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
2160: Concepts: matrices^putting blocked values in with local numbering
2162: .seealso: MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2163: MatSetValuesLocal(), MatSetValuesBlocked()
2164: @*/
2165: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2166: {
2172: MatCheckPreallocated(mat,1);
2173: if (!nrow || !ncol) return(0); /* no values to insert */
2177: if (mat->insertmode == NOT_SET_VALUES) {
2178: mat->insertmode = addv;
2179: }
2180: #if defined(PETSC_USE_DEBUG)
2181: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2182: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2183: if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2184: #endif
2186: if (mat->assembled) {
2187: mat->was_assembled = PETSC_TRUE;
2188: mat->assembled = PETSC_FALSE;
2189: }
2190: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2191: if (mat->ops->setvaluesblockedlocal) {
2192: (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2193: } else {
2194: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2195: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2196: irowm = buf; icolm = buf + nrow;
2197: } else {
2198: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2199: irowm = bufr; icolm = bufc;
2200: }
2201: ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2202: ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2203: MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2204: PetscFree2(bufr,bufc);
2205: }
2206: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2207: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
2208: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
2209: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
2210: }
2211: #endif
2212: return(0);
2213: }
2215: /*@
2216: MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal
2218: Collective on Mat and Vec
2220: Input Parameters:
2221: + mat - the matrix
2222: - x - the vector to be multiplied
2224: Output Parameters:
2225: . y - the result
2227: Notes:
2228: The vectors x and y cannot be the same. I.e., one cannot
2229: call MatMult(A,y,y).
2231: Level: developer
2233: Concepts: matrix-vector product
2235: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2236: @*/
2237: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2238: {
2247: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2248: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2249: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2250: MatCheckPreallocated(mat,1);
2252: if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2253: (*mat->ops->multdiagonalblock)(mat,x,y);
2254: PetscObjectStateIncrease((PetscObject)y);
2255: return(0);
2256: }
2258: /* --------------------------------------------------------*/
2259: /*@
2260: MatMult - Computes the matrix-vector product, y = Ax.
2262: Neighbor-wise Collective on Mat and Vec
2264: Input Parameters:
2265: + mat - the matrix
2266: - x - the vector to be multiplied
2268: Output Parameters:
2269: . y - the result
2271: Notes:
2272: The vectors x and y cannot be the same. I.e., one cannot
2273: call MatMult(A,y,y).
2275: Level: beginner
2277: Concepts: matrix-vector product
2279: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2280: @*/
2281: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2282: {
2290: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2291: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2292: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2293: #if !defined(PETSC_HAVE_CONSTRAINTS)
2294: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2295: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2296: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2297: #endif
2298: VecLocked(y,3);
2299: if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2300: MatCheckPreallocated(mat,1);
2302: VecLockPush(x);
2303: if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2304: PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2305: (*mat->ops->mult)(mat,x,y);
2306: PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2307: if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2308: VecLockPop(x);
2309: return(0);
2310: }
2312: /*@
2313: MatMultTranspose - Computes matrix transpose times a vector y = A^T * x.
2315: Neighbor-wise Collective on Mat and Vec
2317: Input Parameters:
2318: + mat - the matrix
2319: - x - the vector to be multiplied
2321: Output Parameters:
2322: . y - the result
2324: Notes:
2325: The vectors x and y cannot be the same. I.e., one cannot
2326: call MatMultTranspose(A,y,y).
2328: For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2329: use MatMultHermitianTranspose()
2331: Level: beginner
2333: Concepts: matrix vector product^transpose
2335: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2336: @*/
2337: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2338: {
2347: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2348: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2349: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2350: #if !defined(PETSC_HAVE_CONSTRAINTS)
2351: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2352: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2353: #endif
2354: if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2355: MatCheckPreallocated(mat,1);
2357: if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply transpose defined");
2358: PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2359: VecLockPush(x);
2360: (*mat->ops->multtranspose)(mat,x,y);
2361: VecLockPop(x);
2362: PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2363: PetscObjectStateIncrease((PetscObject)y);
2364: if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2365: return(0);
2366: }
2368: /*@
2369: MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.
2371: Neighbor-wise Collective on Mat and Vec
2373: Input Parameters:
2374: + mat - the matrix
2375: - x - the vector to be multilplied
2377: Output Parameters:
2378: . y - the result
2380: Notes:
2381: The vectors x and y cannot be the same. I.e., one cannot
2382: call MatMultHermitianTranspose(A,y,y).
2384: Also called the conjugate transpose, complex conjugate transpose, or adjoint.
2386: For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.
2388: Level: beginner
2390: Concepts: matrix vector product^transpose
2392: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2393: @*/
2394: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2395: {
2397: Vec w;
2405: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2406: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2407: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2408: #if !defined(PETSC_HAVE_CONSTRAINTS)
2409: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2410: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2411: #endif
2412: MatCheckPreallocated(mat,1);
2414: PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2415: if (mat->ops->multhermitiantranspose) {
2416: VecLockPush(x);
2417: (*mat->ops->multhermitiantranspose)(mat,x,y);
2418: VecLockPop(x);
2419: } else {
2420: VecDuplicate(x,&w);
2421: VecCopy(x,w);
2422: VecConjugate(w);
2423: MatMultTranspose(mat,w,y);
2424: VecDestroy(&w);
2425: VecConjugate(y);
2426: }
2427: PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2428: PetscObjectStateIncrease((PetscObject)y);
2429: return(0);
2430: }
2432: /*@
2433: MatMultAdd - Computes v3 = v2 + A * v1.
2435: Neighbor-wise Collective on Mat and Vec
2437: Input Parameters:
2438: + mat - the matrix
2439: - v1, v2 - the vectors
2441: Output Parameters:
2442: . v3 - the result
2444: Notes:
2445: The vectors v1 and v3 cannot be the same. I.e., one cannot
2446: call MatMultAdd(A,v1,v2,v1).
2448: Level: beginner
2450: Concepts: matrix vector product^addition
2452: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2453: @*/
2454: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2455: {
2465: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2466: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2467: if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2468: /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2469: if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2470: if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2471: if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2472: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2473: MatCheckPreallocated(mat,1);
2475: if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2476: PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2477: VecLockPush(v1);
2478: (*mat->ops->multadd)(mat,v1,v2,v3);
2479: VecLockPop(v1);
2480: PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2481: PetscObjectStateIncrease((PetscObject)v3);
2482: return(0);
2483: }
2485: /*@
2486: MatMultTransposeAdd - Computes v3 = v2 + A' * v1.
2488: Neighbor-wise Collective on Mat and Vec
2490: Input Parameters:
2491: + mat - the matrix
2492: - v1, v2 - the vectors
2494: Output Parameters:
2495: . v3 - the result
2497: Notes:
2498: The vectors v1 and v3 cannot be the same. I.e., one cannot
2499: call MatMultTransposeAdd(A,v1,v2,v1).
2501: Level: beginner
2503: Concepts: matrix vector product^transpose and addition
2505: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2506: @*/
2507: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2508: {
2518: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2519: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2520: if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2521: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2522: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2523: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2524: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2525: MatCheckPreallocated(mat,1);
2527: PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2528: VecLockPush(v1);
2529: (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2530: VecLockPop(v1);
2531: PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2532: PetscObjectStateIncrease((PetscObject)v3);
2533: return(0);
2534: }
2536: /*@
2537: MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.
2539: Neighbor-wise Collective on Mat and Vec
2541: Input Parameters:
2542: + mat - the matrix
2543: - v1, v2 - the vectors
2545: Output Parameters:
2546: . v3 - the result
2548: Notes:
2549: The vectors v1 and v3 cannot be the same. I.e., one cannot
2550: call MatMultHermitianTransposeAdd(A,v1,v2,v1).
2552: Level: beginner
2554: Concepts: matrix vector product^transpose and addition
2556: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2557: @*/
2558: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2559: {
2569: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2570: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2571: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2572: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2573: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2574: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2575: MatCheckPreallocated(mat,1);
2577: PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2578: VecLockPush(v1);
2579: if (mat->ops->multhermitiantransposeadd) {
2580: (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2581: } else {
2582: Vec w,z;
2583: VecDuplicate(v1,&w);
2584: VecCopy(v1,w);
2585: VecConjugate(w);
2586: VecDuplicate(v3,&z);
2587: MatMultTranspose(mat,w,z);
2588: VecDestroy(&w);
2589: VecConjugate(z);
2590: VecWAXPY(v3,1.0,v2,z);
2591: VecDestroy(&z);
2592: }
2593: VecLockPop(v1);
2594: PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2595: PetscObjectStateIncrease((PetscObject)v3);
2596: return(0);
2597: }
2599: /*@
2600: MatMultConstrained - The inner multiplication routine for a
2601: constrained matrix P^T A P.
2603: Neighbor-wise Collective on Mat and Vec
2605: Input Parameters:
2606: + mat - the matrix
2607: - x - the vector to be multilplied
2609: Output Parameters:
2610: . y - the result
2612: Notes:
2613: The vectors x and y cannot be the same. I.e., one cannot
2614: call MatMult(A,y,y).
2616: Level: beginner
2618: .keywords: matrix, multiply, matrix-vector product, constraint
2619: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2620: @*/
2621: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2622: {
2629: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2630: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2631: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2632: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2633: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2634: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2636: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2637: VecLockPush(x);
2638: (*mat->ops->multconstrained)(mat,x,y);
2639: VecLockPop(x);
2640: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2641: PetscObjectStateIncrease((PetscObject)y);
2642: return(0);
2643: }
2645: /*@
2646: MatMultTransposeConstrained - The inner multiplication routine for a
2647: constrained matrix P^T A^T P.
2649: Neighbor-wise Collective on Mat and Vec
2651: Input Parameters:
2652: + mat - the matrix
2653: - x - the vector to be multilplied
2655: Output Parameters:
2656: . y - the result
2658: Notes:
2659: The vectors x and y cannot be the same. I.e., one cannot
2660: call MatMult(A,y,y).
2662: Level: beginner
2664: .keywords: matrix, multiply, matrix-vector product, constraint
2665: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2666: @*/
2667: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2668: {
2675: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2676: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2677: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2678: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2679: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2681: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2682: (*mat->ops->multtransposeconstrained)(mat,x,y);
2683: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2684: PetscObjectStateIncrease((PetscObject)y);
2685: return(0);
2686: }
2688: /*@C
2689: MatGetFactorType - gets the type of factorization it is
2691: Note Collective
2692: as the flag
2694: Input Parameters:
2695: . mat - the matrix
2697: Output Parameters:
2698: . t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT
2700: Level: intermediate
2702: .seealso: MatFactorType, MatGetFactor()
2703: @*/
2704: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2705: {
2709: *t = mat->factortype;
2710: return(0);
2711: }
2713: /* ------------------------------------------------------------*/
2714: /*@C
2715: MatGetInfo - Returns information about matrix storage (number of
2716: nonzeros, memory, etc.).
2718: Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag
2720: Input Parameters:
2721: . mat - the matrix
2723: Output Parameters:
2724: + flag - flag indicating the type of parameters to be returned
2725: (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2726: MAT_GLOBAL_SUM - sum over all processors)
2727: - info - matrix information context
2729: Notes:
2730: The MatInfo context contains a variety of matrix data, including
2731: number of nonzeros allocated and used, number of mallocs during
2732: matrix assembly, etc. Additional information for factored matrices
2733: is provided (such as the fill ratio, number of mallocs during
2734: factorization, etc.). Much of this info is printed to PETSC_STDOUT
2735: when using the runtime options
2736: $ -info -mat_view ::ascii_info
2738: Example for C/C++ Users:
2739: See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2740: data within the MatInfo context. For example,
2741: .vb
2742: MatInfo info;
2743: Mat A;
2744: double mal, nz_a, nz_u;
2746: MatGetInfo(A,MAT_LOCAL,&info);
2747: mal = info.mallocs;
2748: nz_a = info.nz_allocated;
2749: .ve
2751: Example for Fortran Users:
2752: Fortran users should declare info as a double precision
2753: array of dimension MAT_INFO_SIZE, and then extract the parameters
2754: of interest. See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2755: a complete list of parameter names.
2756: .vb
2757: double precision info(MAT_INFO_SIZE)
2758: double precision mal, nz_a
2759: Mat A
2760: integer ierr
2762: call MatGetInfo(A,MAT_LOCAL,info,ierr)
2763: mal = info(MAT_INFO_MALLOCS)
2764: nz_a = info(MAT_INFO_NZ_ALLOCATED)
2765: .ve
2767: Level: intermediate
2769: Concepts: matrices^getting information on
2771: Developer Note: fortran interface is not autogenerated as the f90
2772: interface defintion cannot be generated correctly [due to MatInfo]
2774: .seealso: MatStashGetInfo()
2776: @*/
2777: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2778: {
2785: if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2786: MatCheckPreallocated(mat,1);
2787: (*mat->ops->getinfo)(mat,flag,info);
2788: return(0);
2789: }
2791: /*
2792: This is used by external packages where it is not easy to get the info from the actual
2793: matrix factorization.
2794: */
2795: PetscErrorCode MatGetInfo_External(Mat A,MatInfoType flag,MatInfo *info)
2796: {
2800: PetscMemzero(info,sizeof(MatInfo));
2801: return(0);
2802: }
2804: /* ----------------------------------------------------------*/
2806: /*@C
2807: MatLUFactor - Performs in-place LU factorization of matrix.
2809: Collective on Mat
2811: Input Parameters:
2812: + mat - the matrix
2813: . row - row permutation
2814: . col - column permutation
2815: - info - options for factorization, includes
2816: $ fill - expected fill as ratio of original fill.
2817: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2818: $ Run with the option -info to determine an optimal value to use
2820: Notes:
2821: Most users should employ the simplified KSP interface for linear solvers
2822: instead of working directly with matrix algebra routines such as this.
2823: See, e.g., KSPCreate().
2825: This changes the state of the matrix to a factored matrix; it cannot be used
2826: for example with MatSetValues() unless one first calls MatSetUnfactored().
2828: Level: developer
2830: Concepts: matrices^LU factorization
2832: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2833: MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()
2835: Developer Note: fortran interface is not autogenerated as the f90
2836: interface defintion cannot be generated correctly [due to MatFactorInfo]
2838: @*/
2839: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2840: {
2842: MatFactorInfo tinfo;
2850: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2851: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2852: if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2853: MatCheckPreallocated(mat,1);
2854: if (!info) {
2855: MatFactorInfoInitialize(&tinfo);
2856: info = &tinfo;
2857: }
2859: PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2860: (*mat->ops->lufactor)(mat,row,col,info);
2861: PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2862: PetscObjectStateIncrease((PetscObject)mat);
2863: return(0);
2864: }
2866: /*@C
2867: MatILUFactor - Performs in-place ILU factorization of matrix.
2869: Collective on Mat
2871: Input Parameters:
2872: + mat - the matrix
2873: . row - row permutation
2874: . col - column permutation
2875: - info - structure containing
2876: $ levels - number of levels of fill.
2877: $ expected fill - as ratio of original fill.
2878: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2879: missing diagonal entries)
2881: Notes:
2882: Probably really in-place only when level of fill is zero, otherwise allocates
2883: new space to store factored matrix and deletes previous memory.
2885: Most users should employ the simplified KSP interface for linear solvers
2886: instead of working directly with matrix algebra routines such as this.
2887: See, e.g., KSPCreate().
2889: Level: developer
2891: Concepts: matrices^ILU factorization
2893: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
2895: Developer Note: fortran interface is not autogenerated as the f90
2896: interface defintion cannot be generated correctly [due to MatFactorInfo]
2898: @*/
2899: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2900: {
2909: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2910: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2911: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2912: if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2913: MatCheckPreallocated(mat,1);
2915: PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2916: (*mat->ops->ilufactor)(mat,row,col,info);
2917: PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2918: PetscObjectStateIncrease((PetscObject)mat);
2919: return(0);
2920: }
2922: /*@C
2923: MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2924: Call this routine before calling MatLUFactorNumeric().
2926: Collective on Mat
2928: Input Parameters:
2929: + fact - the factor matrix obtained with MatGetFactor()
2930: . mat - the matrix
2931: . row, col - row and column permutations
2932: - info - options for factorization, includes
2933: $ fill - expected fill as ratio of original fill.
2934: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2935: $ Run with the option -info to determine an optimal value to use
2938: Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
2940: Most users should employ the simplified KSP interface for linear solvers
2941: instead of working directly with matrix algebra routines such as this.
2942: See, e.g., KSPCreate().
2944: Level: developer
2946: Concepts: matrices^LU symbolic factorization
2948: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()
2950: Developer Note: fortran interface is not autogenerated as the f90
2951: interface defintion cannot be generated correctly [due to MatFactorInfo]
2953: @*/
2954: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2955: {
2965: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2966: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2967: if (!(fact)->ops->lufactorsymbolic) {
2968: MatSolverType spackage;
2969: MatFactorGetSolverType(fact,&spackage);
2970: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
2971: }
2972: MatCheckPreallocated(mat,2);
2974: PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
2975: (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
2976: PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
2977: PetscObjectStateIncrease((PetscObject)fact);
2978: return(0);
2979: }
2981: /*@C
2982: MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
2983: Call this routine after first calling MatLUFactorSymbolic().
2985: Collective on Mat
2987: Input Parameters:
2988: + fact - the factor matrix obtained with MatGetFactor()
2989: . mat - the matrix
2990: - info - options for factorization
2992: Notes:
2993: See MatLUFactor() for in-place factorization. See
2994: MatCholeskyFactorNumeric() for the symmetric, positive definite case.
2996: Most users should employ the simplified KSP interface for linear solvers
2997: instead of working directly with matrix algebra routines such as this.
2998: See, e.g., KSPCreate().
3000: Level: developer
3002: Concepts: matrices^LU numeric factorization
3004: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()
3006: Developer Note: fortran interface is not autogenerated as the f90
3007: interface defintion cannot be generated correctly [due to MatFactorInfo]
3009: @*/
3010: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3011: {
3019: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3020: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3022: if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
3023: MatCheckPreallocated(mat,2);
3024: PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
3025: (fact->ops->lufactornumeric)(fact,mat,info);
3026: PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
3027: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3028: PetscObjectStateIncrease((PetscObject)fact);
3029: return(0);
3030: }
3032: /*@C
3033: MatCholeskyFactor - Performs in-place Cholesky factorization of a
3034: symmetric matrix.
3036: Collective on Mat
3038: Input Parameters:
3039: + mat - the matrix
3040: . perm - row and column permutations
3041: - f - expected fill as ratio of original fill
3043: Notes:
3044: See MatLUFactor() for the nonsymmetric case. See also
3045: MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().
3047: Most users should employ the simplified KSP interface for linear solvers
3048: instead of working directly with matrix algebra routines such as this.
3049: See, e.g., KSPCreate().
3051: Level: developer
3053: Concepts: matrices^Cholesky factorization
3055: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
3056: MatGetOrdering()
3058: Developer Note: fortran interface is not autogenerated as the f90
3059: interface defintion cannot be generated correctly [due to MatFactorInfo]
3061: @*/
3062: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
3063: {
3071: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3072: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3073: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3074: if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"In-place factorization for Mat type %s is not supported, try out-of-place factorization. See MatCholeskyFactorSymbolic/Numeric",((PetscObject)mat)->type_name);
3075: MatCheckPreallocated(mat,1);
3077: PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3078: (*mat->ops->choleskyfactor)(mat,perm,info);
3079: PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3080: PetscObjectStateIncrease((PetscObject)mat);
3081: return(0);
3082: }
3084: /*@C
3085: MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3086: of a symmetric matrix.
3088: Collective on Mat
3090: Input Parameters:
3091: + fact - the factor matrix obtained with MatGetFactor()
3092: . mat - the matrix
3093: . perm - row and column permutations
3094: - info - options for factorization, includes
3095: $ fill - expected fill as ratio of original fill.
3096: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3097: $ Run with the option -info to determine an optimal value to use
3099: Notes:
3100: See MatLUFactorSymbolic() for the nonsymmetric case. See also
3101: MatCholeskyFactor() and MatCholeskyFactorNumeric().
3103: Most users should employ the simplified KSP interface for linear solvers
3104: instead of working directly with matrix algebra routines such as this.
3105: See, e.g., KSPCreate().
3107: Level: developer
3109: Concepts: matrices^Cholesky symbolic factorization
3111: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3112: MatGetOrdering()
3114: Developer Note: fortran interface is not autogenerated as the f90
3115: interface defintion cannot be generated correctly [due to MatFactorInfo]
3117: @*/
3118: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3119: {
3128: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3129: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3130: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3131: if (!(fact)->ops->choleskyfactorsymbolic) {
3132: MatSolverType spackage;
3133: MatFactorGetSolverType(fact,&spackage);
3134: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3135: }
3136: MatCheckPreallocated(mat,2);
3138: PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3139: (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3140: PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3141: PetscObjectStateIncrease((PetscObject)fact);
3142: return(0);
3143: }
3145: /*@C
3146: MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3147: of a symmetric matrix. Call this routine after first calling
3148: MatCholeskyFactorSymbolic().
3150: Collective on Mat
3152: Input Parameters:
3153: + fact - the factor matrix obtained with MatGetFactor()
3154: . mat - the initial matrix
3155: . info - options for factorization
3156: - fact - the symbolic factor of mat
3159: Notes:
3160: Most users should employ the simplified KSP interface for linear solvers
3161: instead of working directly with matrix algebra routines such as this.
3162: See, e.g., KSPCreate().
3164: Level: developer
3166: Concepts: matrices^Cholesky numeric factorization
3168: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()
3170: Developer Note: fortran interface is not autogenerated as the f90
3171: interface defintion cannot be generated correctly [due to MatFactorInfo]
3173: @*/
3174: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3175: {
3183: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3184: if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3185: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3186: MatCheckPreallocated(mat,2);
3188: PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3189: (fact->ops->choleskyfactornumeric)(fact,mat,info);
3190: PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3191: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3192: PetscObjectStateIncrease((PetscObject)fact);
3193: return(0);
3194: }
3196: /* ----------------------------------------------------------------*/
3197: /*@
3198: MatSolve - Solves A x = b, given a factored matrix.
3200: Neighbor-wise Collective on Mat and Vec
3202: Input Parameters:
3203: + mat - the factored matrix
3204: - b - the right-hand-side vector
3206: Output Parameter:
3207: . x - the result vector
3209: Notes:
3210: The vectors b and x cannot be the same. I.e., one cannot
3211: call MatSolve(A,x,x).
3213: Notes:
3214: Most users should employ the simplified KSP interface for linear solvers
3215: instead of working directly with matrix algebra routines such as this.
3216: See, e.g., KSPCreate().
3218: Level: developer
3220: Concepts: matrices^triangular solves
3222: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3223: @*/
3224: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3225: {
3235: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3236: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3237: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3238: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3239: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3240: if (!mat->rmap->N && !mat->cmap->N) return(0);
3241: if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3242: MatCheckPreallocated(mat,1);
3244: PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3245: if (mat->factorerrortype) {
3246: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3247: VecSetInf(x);
3248: } else {
3249: (*mat->ops->solve)(mat,b,x);
3250: }
3251: PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3252: PetscObjectStateIncrease((PetscObject)x);
3253: return(0);
3254: }
3256: static PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X, PetscBool trans)
3257: {
3259: Vec b,x;
3260: PetscInt m,N,i;
3261: PetscScalar *bb,*xx;
3262: PetscBool flg;
3265: PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3266: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3267: PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3268: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");
3270: MatDenseGetArray(B,&bb);
3271: MatDenseGetArray(X,&xx);
3272: MatGetLocalSize(B,&m,NULL); /* number local rows */
3273: MatGetSize(B,NULL,&N); /* total columns in dense matrix */
3274: MatCreateVecs(A,&x,&b);
3275: for (i=0; i<N; i++) {
3276: VecPlaceArray(b,bb + i*m);
3277: VecPlaceArray(x,xx + i*m);
3278: if (trans) {
3279: MatSolveTranspose(A,b,x);
3280: } else {
3281: MatSolve(A,b,x);
3282: }
3283: VecResetArray(x);
3284: VecResetArray(b);
3285: }
3286: VecDestroy(&b);
3287: VecDestroy(&x);
3288: MatDenseRestoreArray(B,&bb);
3289: MatDenseRestoreArray(X,&xx);
3290: return(0);
3291: }
3293: /*@
3294: MatMatSolve - Solves A X = B, given a factored matrix.
3296: Neighbor-wise Collective on Mat
3298: Input Parameters:
3299: + A - the factored matrix
3300: - B - the right-hand-side matrix (dense matrix)
3302: Output Parameter:
3303: . X - the result matrix (dense matrix)
3305: Notes:
3306: The matrices b and x cannot be the same. I.e., one cannot
3307: call MatMatSolve(A,x,x).
3309: Notes:
3310: Most users should usually employ the simplified KSP interface for linear solvers
3311: instead of working directly with matrix algebra routines such as this.
3312: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3313: at a time.
3315: When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3316: it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.
3318: Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.
3320: Level: developer
3322: Concepts: matrices^triangular solves
3324: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3325: @*/
3326: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3327: {
3337: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3338: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3339: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3340: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3341: if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3342: if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3343: if (!A->rmap->N && !A->cmap->N) return(0);
3344: MatCheckPreallocated(A,1);
3346: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3347: if (!A->ops->matsolve) {
3348: PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3349: MatMatSolve_Basic(A,B,X,PETSC_FALSE);
3350: } else {
3351: (*A->ops->matsolve)(A,B,X);
3352: }
3353: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3354: PetscObjectStateIncrease((PetscObject)X);
3355: return(0);
3356: }
3358: /*@
3359: MatMatSolveTranspose - Solves A^T X = B, given a factored matrix.
3361: Neighbor-wise Collective on Mat
3363: Input Parameters:
3364: + A - the factored matrix
3365: - B - the right-hand-side matrix (dense matrix)
3367: Output Parameter:
3368: . X - the result matrix (dense matrix)
3370: Notes:
3371: The matrices b and x cannot be the same. I.e., one cannot
3372: call MatMatSolveTranspose(A,x,x).
3374: Notes:
3375: Most users should usually employ the simplified KSP interface for linear solvers
3376: instead of working directly with matrix algebra routines such as this.
3377: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3378: at a time.
3380: When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3381: it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.
3383: Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.
3385: Level: developer
3387: Concepts: matrices^triangular solves
3389: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3390: @*/
3391: PetscErrorCode MatMatSolveTranspose(Mat A,Mat B,Mat X)
3392: {
3402: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3403: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3404: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3405: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3406: if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3407: if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3408: if (!A->rmap->N && !A->cmap->N) return(0);
3409: MatCheckPreallocated(A,1);
3411: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3412: if (!A->ops->matsolvetranspose) {
3413: PetscInfo1(A,"Mat type %s using basic MatMatSolveTranspose\n",((PetscObject)A)->type_name);
3414: MatMatSolve_Basic(A,B,X,PETSC_TRUE);
3415: } else {
3416: (*A->ops->matsolvetranspose)(A,B,X);
3417: }
3418: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3419: PetscObjectStateIncrease((PetscObject)X);
3420: return(0);
3421: }
3423: /*@
3424: MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3425: U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,
3427: Neighbor-wise Collective on Mat and Vec
3429: Input Parameters:
3430: + mat - the factored matrix
3431: - b - the right-hand-side vector
3433: Output Parameter:
3434: . x - the result vector
3436: Notes:
3437: MatSolve() should be used for most applications, as it performs
3438: a forward solve followed by a backward solve.
3440: The vectors b and x cannot be the same, i.e., one cannot
3441: call MatForwardSolve(A,x,x).
3443: For matrix in seqsbaij format with block size larger than 1,
3444: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3445: MatForwardSolve() solves U^T*D y = b, and
3446: MatBackwardSolve() solves U x = y.
3447: Thus they do not provide a symmetric preconditioner.
3449: Most users should employ the simplified KSP interface for linear solvers
3450: instead of working directly with matrix algebra routines such as this.
3451: See, e.g., KSPCreate().
3453: Level: developer
3455: Concepts: matrices^forward solves
3457: .seealso: MatSolve(), MatBackwardSolve()
3458: @*/
3459: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3460: {
3470: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3471: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3472: if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3473: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3474: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3475: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3476: MatCheckPreallocated(mat,1);
3477: PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3478: (*mat->ops->forwardsolve)(mat,b,x);
3479: PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3480: PetscObjectStateIncrease((PetscObject)x);
3481: return(0);
3482: }
3484: /*@
3485: MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3486: D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,
3488: Neighbor-wise Collective on Mat and Vec
3490: Input Parameters:
3491: + mat - the factored matrix
3492: - b - the right-hand-side vector
3494: Output Parameter:
3495: . x - the result vector
3497: Notes:
3498: MatSolve() should be used for most applications, as it performs
3499: a forward solve followed by a backward solve.
3501: The vectors b and x cannot be the same. I.e., one cannot
3502: call MatBackwardSolve(A,x,x).
3504: For matrix in seqsbaij format with block size larger than 1,
3505: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3506: MatForwardSolve() solves U^T*D y = b, and
3507: MatBackwardSolve() solves U x = y.
3508: Thus they do not provide a symmetric preconditioner.
3510: Most users should employ the simplified KSP interface for linear solvers
3511: instead of working directly with matrix algebra routines such as this.
3512: See, e.g., KSPCreate().
3514: Level: developer
3516: Concepts: matrices^backward solves
3518: .seealso: MatSolve(), MatForwardSolve()
3519: @*/
3520: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3521: {
3531: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3532: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3533: if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3534: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3535: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3536: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3537: MatCheckPreallocated(mat,1);
3539: PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3540: (*mat->ops->backwardsolve)(mat,b,x);
3541: PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3542: PetscObjectStateIncrease((PetscObject)x);
3543: return(0);
3544: }
3546: /*@
3547: MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.
3549: Neighbor-wise Collective on Mat and Vec
3551: Input Parameters:
3552: + mat - the factored matrix
3553: . b - the right-hand-side vector
3554: - y - the vector to be added to
3556: Output Parameter:
3557: . x - the result vector
3559: Notes:
3560: The vectors b and x cannot be the same. I.e., one cannot
3561: call MatSolveAdd(A,x,y,x).
3563: Most users should employ the simplified KSP interface for linear solvers
3564: instead of working directly with matrix algebra routines such as this.
3565: See, e.g., KSPCreate().
3567: Level: developer
3569: Concepts: matrices^triangular solves
3571: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3572: @*/
3573: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3574: {
3575: PetscScalar one = 1.0;
3576: Vec tmp;
3588: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3589: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3590: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3591: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3592: if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3593: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3594: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3595: MatCheckPreallocated(mat,1);
3597: PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3598: if (mat->ops->solveadd) {
3599: (*mat->ops->solveadd)(mat,b,y,x);
3600: } else {
3601: /* do the solve then the add manually */
3602: if (x != y) {
3603: MatSolve(mat,b,x);
3604: VecAXPY(x,one,y);
3605: } else {
3606: VecDuplicate(x,&tmp);
3607: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3608: VecCopy(x,tmp);
3609: MatSolve(mat,b,x);
3610: VecAXPY(x,one,tmp);
3611: VecDestroy(&tmp);
3612: }
3613: }
3614: PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3615: PetscObjectStateIncrease((PetscObject)x);
3616: return(0);
3617: }
3619: /*@
3620: MatSolveTranspose - Solves A' x = b, given a factored matrix.
3622: Neighbor-wise Collective on Mat and Vec
3624: Input Parameters:
3625: + mat - the factored matrix
3626: - b - the right-hand-side vector
3628: Output Parameter:
3629: . x - the result vector
3631: Notes:
3632: The vectors b and x cannot be the same. I.e., one cannot
3633: call MatSolveTranspose(A,x,x).
3635: Most users should employ the simplified KSP interface for linear solvers
3636: instead of working directly with matrix algebra routines such as this.
3637: See, e.g., KSPCreate().
3639: Level: developer
3641: Concepts: matrices^triangular solves
3643: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3644: @*/
3645: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3646: {
3656: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3657: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3658: if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3659: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3660: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3661: MatCheckPreallocated(mat,1);
3662: PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3663: if (mat->factorerrortype) {
3664: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3665: VecSetInf(x);
3666: } else {
3667: (*mat->ops->solvetranspose)(mat,b,x);
3668: }
3669: PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3670: PetscObjectStateIncrease((PetscObject)x);
3671: return(0);
3672: }
3674: /*@
3675: MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3676: factored matrix.
3678: Neighbor-wise Collective on Mat and Vec
3680: Input Parameters:
3681: + mat - the factored matrix
3682: . b - the right-hand-side vector
3683: - y - the vector to be added to
3685: Output Parameter:
3686: . x - the result vector
3688: Notes:
3689: The vectors b and x cannot be the same. I.e., one cannot
3690: call MatSolveTransposeAdd(A,x,y,x).
3692: Most users should employ the simplified KSP interface for linear solvers
3693: instead of working directly with matrix algebra routines such as this.
3694: See, e.g., KSPCreate().
3696: Level: developer
3698: Concepts: matrices^triangular solves
3700: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3701: @*/
3702: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3703: {
3704: PetscScalar one = 1.0;
3706: Vec tmp;
3717: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3718: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3719: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3720: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3721: if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3722: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3723: MatCheckPreallocated(mat,1);
3725: PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3726: if (mat->ops->solvetransposeadd) {
3727: if (mat->factorerrortype) {
3728: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3729: VecSetInf(x);
3730: } else {
3731: (*mat->ops->solvetransposeadd)(mat,b,y,x);
3732: }
3733: } else {
3734: /* do the solve then the add manually */
3735: if (x != y) {
3736: MatSolveTranspose(mat,b,x);
3737: VecAXPY(x,one,y);
3738: } else {
3739: VecDuplicate(x,&tmp);
3740: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3741: VecCopy(x,tmp);
3742: MatSolveTranspose(mat,b,x);
3743: VecAXPY(x,one,tmp);
3744: VecDestroy(&tmp);
3745: }
3746: }
3747: PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3748: PetscObjectStateIncrease((PetscObject)x);
3749: return(0);
3750: }
3751: /* ----------------------------------------------------------------*/
3753: /*@
3754: MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.
3756: Neighbor-wise Collective on Mat and Vec
3758: Input Parameters:
3759: + mat - the matrix
3760: . b - the right hand side
3761: . omega - the relaxation factor
3762: . flag - flag indicating the type of SOR (see below)
3763: . shift - diagonal shift
3764: . its - the number of iterations
3765: - lits - the number of local iterations
3767: Output Parameters:
3768: . x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)
3770: SOR Flags:
3771: . SOR_FORWARD_SWEEP - forward SOR
3772: . SOR_BACKWARD_SWEEP - backward SOR
3773: . SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3774: . SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3775: . SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3776: . SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3777: . SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3778: upper/lower triangular part of matrix to
3779: vector (with omega)
3780: . SOR_ZERO_INITIAL_GUESS - zero initial guess
3782: Notes:
3783: SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3784: SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3785: on each processor.
3787: Application programmers will not generally use MatSOR() directly,
3788: but instead will employ the KSP/PC interface.
3790: Notes: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing
3792: Notes for Advanced Users:
3793: The flags are implemented as bitwise inclusive or operations.
3794: For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3795: to specify a zero initial guess for SSOR.
3797: Most users should employ the simplified KSP interface for linear solvers
3798: instead of working directly with matrix algebra routines such as this.
3799: See, e.g., KSPCreate().
3801: Vectors x and b CANNOT be the same
3803: Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes
3805: Level: developer
3807: Concepts: matrices^relaxation
3808: Concepts: matrices^SOR
3809: Concepts: matrices^Gauss-Seidel
3811: @*/
3812: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3813: {
3823: if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3824: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3825: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3826: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3827: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3828: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3829: if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3830: if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3831: if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");
3833: MatCheckPreallocated(mat,1);
3834: PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3835: ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3836: PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3837: PetscObjectStateIncrease((PetscObject)x);
3838: return(0);
3839: }
3841: /*
3842: Default matrix copy routine.
3843: */
3844: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3845: {
3846: PetscErrorCode ierr;
3847: PetscInt i,rstart = 0,rend = 0,nz;
3848: const PetscInt *cwork;
3849: const PetscScalar *vwork;
3852: if (B->assembled) {
3853: MatZeroEntries(B);
3854: }
3855: MatGetOwnershipRange(A,&rstart,&rend);
3856: for (i=rstart; i<rend; i++) {
3857: MatGetRow(A,i,&nz,&cwork,&vwork);
3858: MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3859: MatRestoreRow(A,i,&nz,&cwork,&vwork);
3860: }
3861: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3862: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3863: return(0);
3864: }
3866: /*@
3867: MatCopy - Copys a matrix to another matrix.
3869: Collective on Mat
3871: Input Parameters:
3872: + A - the matrix
3873: - str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN
3875: Output Parameter:
3876: . B - where the copy is put
3878: Notes:
3879: If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3880: same nonzero pattern or the routine will crash.
3882: MatCopy() copies the matrix entries of a matrix to another existing
3883: matrix (after first zeroing the second matrix). A related routine is
3884: MatConvert(), which first creates a new matrix and then copies the data.
3886: Level: intermediate
3888: Concepts: matrices^copying
3890: .seealso: MatConvert(), MatDuplicate()
3892: @*/
3893: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3894: {
3896: PetscInt i;
3904: MatCheckPreallocated(B,2);
3905: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3906: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3907: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
3908: MatCheckPreallocated(A,1);
3909: if (A == B) return(0);
3911: PetscLogEventBegin(MAT_Copy,A,B,0,0);
3912: if (A->ops->copy) {
3913: (*A->ops->copy)(A,B,str);
3914: } else { /* generic conversion */
3915: MatCopy_Basic(A,B,str);
3916: }
3918: B->stencil.dim = A->stencil.dim;
3919: B->stencil.noc = A->stencil.noc;
3920: for (i=0; i<=A->stencil.dim; i++) {
3921: B->stencil.dims[i] = A->stencil.dims[i];
3922: B->stencil.starts[i] = A->stencil.starts[i];
3923: }
3925: PetscLogEventEnd(MAT_Copy,A,B,0,0);
3926: PetscObjectStateIncrease((PetscObject)B);
3927: return(0);
3928: }
3930: /*@C
3931: MatConvert - Converts a matrix to another matrix, either of the same
3932: or different type.
3934: Collective on Mat
3936: Input Parameters:
3937: + mat - the matrix
3938: . newtype - new matrix type. Use MATSAME to create a new matrix of the
3939: same type as the original matrix.
3940: - reuse - denotes if the destination matrix is to be created or reused.
3941: Use MAT_INPLACE_MATRIX for inplace conversion (that is when you want the input mat to be changed to contain the matrix in the new format), otherwise use
3942: MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX (can only be used after the first call was made with MAT_INITIAL_MATRIX, causes the matrix space in M to be reused).
3944: Output Parameter:
3945: . M - pointer to place new matrix
3947: Notes:
3948: MatConvert() first creates a new matrix and then copies the data from
3949: the first matrix. A related routine is MatCopy(), which copies the matrix
3950: entries of one matrix to another already existing matrix context.
3952: Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
3953: the MPI communicator of the generated matrix is always the same as the communicator
3954: of the input matrix.
3956: Level: intermediate
3958: Concepts: matrices^converting between storage formats
3960: .seealso: MatCopy(), MatDuplicate()
3961: @*/
3962: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
3963: {
3965: PetscBool sametype,issame,flg;
3966: char convname[256],mtype[256];
3967: Mat B;
3973: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3974: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3975: MatCheckPreallocated(mat,1);
3976: MatSetOption(mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
3978: PetscOptionsGetString(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
3979: if (flg) {
3980: newtype = mtype;
3981: }
3982: PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
3983: PetscStrcmp(newtype,"same",&issame);
3984: if ((reuse == MAT_INPLACE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires same input and output matrix");
3985: if ((reuse == MAT_REUSE_MATRIX) && (mat == *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX means reuse matrix in final argument, perhaps you mean MAT_INPLACE_MATRIX");
3987: if ((reuse == MAT_INPLACE_MATRIX) && (issame || sametype)) return(0);
3989: if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
3990: (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
3991: } else {
3992: PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
3993: const char *prefix[3] = {"seq","mpi",""};
3994: PetscInt i;
3995: /*
3996: Order of precedence:
3997: 1) See if a specialized converter is known to the current matrix.
3998: 2) See if a specialized converter is known to the desired matrix class.
3999: 3) See if a good general converter is registered for the desired class
4000: (as of 6/27/03 only MATMPIADJ falls into this category).
4001: 4) See if a good general converter is known for the current matrix.
4002: 5) Use a really basic converter.
4003: */
4005: /* 1) See if a specialized converter is known to the current matrix and the desired class */
4006: for (i=0; i<3; i++) {
4007: PetscStrncpy(convname,"MatConvert_",sizeof(convname));
4008: PetscStrlcat(convname,((PetscObject)mat)->type_name,sizeof(convname));
4009: PetscStrlcat(convname,"_",sizeof(convname));
4010: PetscStrlcat(convname,prefix[i],sizeof(convname));
4011: PetscStrlcat(convname,issame ? ((PetscObject)mat)->type_name : newtype,sizeof(convname));
4012: PetscStrlcat(convname,"_C",sizeof(convname));
4013: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4014: if (conv) goto foundconv;
4015: }
4017: /* 2) See if a specialized converter is known to the desired matrix class. */
4018: MatCreate(PetscObjectComm((PetscObject)mat),&B);
4019: MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
4020: MatSetType(B,newtype);
4021: for (i=0; i<3; i++) {
4022: PetscStrncpy(convname,"MatConvert_",sizeof(convname));
4023: PetscStrlcat(convname,((PetscObject)mat)->type_name,sizeof(convname));
4024: PetscStrlcat(convname,"_",sizeof(convname));
4025: PetscStrlcat(convname,prefix[i],sizeof(convname));
4026: PetscStrlcat(convname,newtype,sizeof(convname));
4027: PetscStrlcat(convname,"_C",sizeof(convname));
4028: PetscObjectQueryFunction((PetscObject)B,convname,&conv);
4029: if (conv) {
4030: MatDestroy(&B);
4031: goto foundconv;
4032: }
4033: }
4035: /* 3) See if a good general converter is registered for the desired class */
4036: conv = B->ops->convertfrom;
4037: MatDestroy(&B);
4038: if (conv) goto foundconv;
4040: /* 4) See if a good general converter is known for the current matrix */
4041: if (mat->ops->convert) {
4042: conv = mat->ops->convert;
4043: }
4044: if (conv) goto foundconv;
4046: /* 5) Use a really basic converter. */
4047: conv = MatConvert_Basic;
4049: foundconv:
4050: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4051: (*conv)(mat,newtype,reuse,M);
4052: if (mat->rmap->mapping && mat->cmap->mapping && !(*M)->rmap->mapping && !(*M)->cmap->mapping) {
4053: /* the block sizes must be same if the mappings are copied over */
4054: (*M)->rmap->bs = mat->rmap->bs;
4055: (*M)->cmap->bs = mat->cmap->bs;
4056: PetscObjectReference((PetscObject)mat->rmap->mapping);
4057: PetscObjectReference((PetscObject)mat->cmap->mapping);
4058: (*M)->rmap->mapping = mat->rmap->mapping;
4059: (*M)->cmap->mapping = mat->cmap->mapping;
4060: }
4061: (*M)->stencil.dim = mat->stencil.dim;
4062: (*M)->stencil.noc = mat->stencil.noc;
4063: for (i=0; i<=mat->stencil.dim; i++) {
4064: (*M)->stencil.dims[i] = mat->stencil.dims[i];
4065: (*M)->stencil.starts[i] = mat->stencil.starts[i];
4066: }
4067: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4068: }
4069: PetscObjectStateIncrease((PetscObject)*M);
4071: /* Copy Mat options */
4072: if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
4073: if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
4074: return(0);
4075: }
4077: /*@C
4078: MatFactorGetSolverType - Returns name of the package providing the factorization routines
4080: Not Collective
4082: Input Parameter:
4083: . mat - the matrix, must be a factored matrix
4085: Output Parameter:
4086: . type - the string name of the package (do not free this string)
4088: Notes:
4089: In Fortran you pass in a empty string and the package name will be copied into it.
4090: (Make sure the string is long enough)
4092: Level: intermediate
4094: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4095: @*/
4096: PetscErrorCode MatFactorGetSolverType(Mat mat, MatSolverType *type)
4097: {
4098: PetscErrorCode ierr, (*conv)(Mat,MatSolverType*);
4103: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
4104: PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverType_C",&conv);
4105: if (!conv) {
4106: *type = MATSOLVERPETSC;
4107: } else {
4108: (*conv)(mat,type);
4109: }
4110: return(0);
4111: }
4113: typedef struct _MatSolverTypeForSpecifcType* MatSolverTypeForSpecifcType;
4114: struct _MatSolverTypeForSpecifcType {
4115: MatType mtype;
4116: PetscErrorCode (*getfactor[4])(Mat,MatFactorType,Mat*);
4117: MatSolverTypeForSpecifcType next;
4118: };
4120: typedef struct _MatSolverTypeHolder* MatSolverTypeHolder;
4121: struct _MatSolverTypeHolder {
4122: char *name;
4123: MatSolverTypeForSpecifcType handlers;
4124: MatSolverTypeHolder next;
4125: };
4127: static MatSolverTypeHolder MatSolverTypeHolders = NULL;
4129: /*@C
4130: MatSolvePackageRegister - Registers a MatSolverType that works for a particular matrix type
4132: Input Parameters:
4133: + package - name of the package, for example petsc or superlu
4134: . mtype - the matrix type that works with this package
4135: . ftype - the type of factorization supported by the package
4136: - getfactor - routine that will create the factored matrix ready to be used
4138: Level: intermediate
4140: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4141: @*/
4142: PetscErrorCode MatSolverTypeRegister(MatSolverType package,const MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
4143: {
4144: PetscErrorCode ierr;
4145: MatSolverTypeHolder next = MatSolverTypeHolders,prev;
4146: PetscBool flg;
4147: MatSolverTypeForSpecifcType inext,iprev = NULL;
4150: if (!next) {
4151: PetscNew(&MatSolverTypeHolders);
4152: PetscStrallocpy(package,&MatSolverTypeHolders->name);
4153: PetscNew(&MatSolverTypeHolders->handlers);
4154: PetscStrallocpy(mtype,(char **)&MatSolverTypeHolders->handlers->mtype);
4155: MatSolverTypeHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4156: return(0);
4157: }
4158: while (next) {
4159: PetscStrcasecmp(package,next->name,&flg);
4160: if (flg) {
4161: if (!next->handlers) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MatSolverTypeHolder is missing handlers");
4162: inext = next->handlers;
4163: while (inext) {
4164: PetscStrcasecmp(mtype,inext->mtype,&flg);
4165: if (flg) {
4166: inext->getfactor[(int)ftype-1] = getfactor;
4167: return(0);
4168: }
4169: iprev = inext;
4170: inext = inext->next;
4171: }
4172: PetscNew(&iprev->next);
4173: PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4174: iprev->next->getfactor[(int)ftype-1] = getfactor;
4175: return(0);
4176: }
4177: prev = next;
4178: next = next->next;
4179: }
4180: PetscNew(&prev->next);
4181: PetscStrallocpy(package,&prev->next->name);
4182: PetscNew(&prev->next->handlers);
4183: PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4184: prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4185: return(0);
4186: }
4188: /*@C
4189: MatSolvePackageGet - Get's the function that creates the factor matrix if it exist
4191: Input Parameters:
4192: + package - name of the package, for example petsc or superlu
4193: . ftype - the type of factorization supported by the package
4194: - mtype - the matrix type that works with this package
4196: Output Parameters:
4197: + foundpackage - PETSC_TRUE if the package was registered
4198: . foundmtype - PETSC_TRUE if the package supports the requested mtype
4199: - getfactor - routine that will create the factored matrix ready to be used or NULL if not found
4201: Level: intermediate
4203: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4204: @*/
4205: PetscErrorCode MatSolverTypeGet(MatSolverType package,const MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4206: {
4207: PetscErrorCode ierr;
4208: MatSolverTypeHolder next = MatSolverTypeHolders;
4209: PetscBool flg;
4210: MatSolverTypeForSpecifcType inext;
4213: if (foundpackage) *foundpackage = PETSC_FALSE;
4214: if (foundmtype) *foundmtype = PETSC_FALSE;
4215: if (getfactor) *getfactor = NULL;
4217: if (package) {
4218: while (next) {
4219: PetscStrcasecmp(package,next->name,&flg);
4220: if (flg) {
4221: if (foundpackage) *foundpackage = PETSC_TRUE;
4222: inext = next->handlers;
4223: while (inext) {
4224: PetscStrbeginswith(mtype,inext->mtype,&flg);
4225: if (flg) {
4226: if (foundmtype) *foundmtype = PETSC_TRUE;
4227: if (getfactor) *getfactor = inext->getfactor[(int)ftype-1];
4228: return(0);
4229: }
4230: inext = inext->next;
4231: }
4232: }
4233: next = next->next;
4234: }
4235: } else {
4236: while (next) {
4237: inext = next->handlers;
4238: while (inext) {
4239: PetscStrbeginswith(mtype,inext->mtype,&flg);
4240: if (flg && inext->getfactor[(int)ftype-1]) {
4241: if (foundpackage) *foundpackage = PETSC_TRUE;
4242: if (foundmtype) *foundmtype = PETSC_TRUE;
4243: if (getfactor) *getfactor = inext->getfactor[(int)ftype-1];
4244: return(0);
4245: }
4246: inext = inext->next;
4247: }
4248: next = next->next;
4249: }
4250: }
4251: return(0);
4252: }
4254: PetscErrorCode MatSolverTypeDestroy(void)
4255: {
4256: PetscErrorCode ierr;
4257: MatSolverTypeHolder next = MatSolverTypeHolders,prev;
4258: MatSolverTypeForSpecifcType inext,iprev;
4261: while (next) {
4262: PetscFree(next->name);
4263: inext = next->handlers;
4264: while (inext) {
4265: PetscFree(inext->mtype);
4266: iprev = inext;
4267: inext = inext->next;
4268: PetscFree(iprev);
4269: }
4270: prev = next;
4271: next = next->next;
4272: PetscFree(prev);
4273: }
4274: MatSolverTypeHolders = NULL;
4275: return(0);
4276: }
4278: /*@C
4279: MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()
4281: Collective on Mat
4283: Input Parameters:
4284: + mat - the matrix
4285: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4286: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4288: Output Parameters:
4289: . f - the factor matrix used with MatXXFactorSymbolic() calls
4291: Notes:
4292: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4293: such as pastix, superlu, mumps etc.
4295: PETSc must have been ./configure to use the external solver, using the option --download-package
4297: Level: intermediate
4299: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4300: @*/
4301: PetscErrorCode MatGetFactor(Mat mat, MatSolverType type,MatFactorType ftype,Mat *f)
4302: {
4303: PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4304: PetscBool foundpackage,foundmtype;
4310: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4311: MatCheckPreallocated(mat,1);
4313: MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4314: if (!foundpackage) {
4315: if (type) {
4316: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4317: } else {
4318: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate a solver package. Perhaps you must ./configure with --download-<package>");
4319: }
4320: }
4322: if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverType %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4323: if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverType %s does not support factorization type %s for matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);
4325: #if defined(PETSC_USE_COMPLEX)
4326: if (mat->hermitian && !mat->symmetric && (ftype == MAT_FACTOR_CHOLESKY||ftype == MAT_FACTOR_ICC)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Hermitian CHOLESKY or ICC Factor is not supported");
4327: #endif
4329: (*conv)(mat,ftype,f);
4330: return(0);
4331: }
4333: /*@C
4334: MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type
4336: Not Collective
4338: Input Parameters:
4339: + mat - the matrix
4340: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4341: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4343: Output Parameter:
4344: . flg - PETSC_TRUE if the factorization is available
4346: Notes:
4347: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4348: such as pastix, superlu, mumps etc.
4350: PETSc must have been ./configure to use the external solver, using the option --download-package
4352: Level: intermediate
4354: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4355: @*/
4356: PetscErrorCode MatGetFactorAvailable(Mat mat, MatSolverType type,MatFactorType ftype,PetscBool *flg)
4357: {
4358: PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);
4364: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4365: MatCheckPreallocated(mat,1);
4367: *flg = PETSC_FALSE;
4368: MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4369: if (gconv) {
4370: *flg = PETSC_TRUE;
4371: }
4372: return(0);
4373: }
4375: #include <petscdmtypes.h>
4377: /*@
4378: MatDuplicate - Duplicates a matrix including the non-zero structure.
4380: Collective on Mat
4382: Input Parameters:
4383: + mat - the matrix
4384: - op - One of MAT_DO_NOT_COPY_VALUES, MAT_COPY_VALUES, or MAT_SHARE_NONZERO_PATTERN.
4385: See the manual page for MatDuplicateOption for an explanation of these options.
4387: Output Parameter:
4388: . M - pointer to place new matrix
4390: Level: intermediate
4392: Concepts: matrices^duplicating
4394: Notes: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.
4396: .seealso: MatCopy(), MatConvert(), MatDuplicateOption
4397: @*/
4398: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4399: {
4401: Mat B;
4402: PetscInt i;
4403: DM dm;
4409: if (op == MAT_COPY_VALUES && !mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"MAT_COPY_VALUES not allowed for unassembled matrix");
4410: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4411: MatCheckPreallocated(mat,1);
4413: *M = 0;
4414: if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4415: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4416: (*mat->ops->duplicate)(mat,op,M);
4417: B = *M;
4419: B->stencil.dim = mat->stencil.dim;
4420: B->stencil.noc = mat->stencil.noc;
4421: for (i=0; i<=mat->stencil.dim; i++) {
4422: B->stencil.dims[i] = mat->stencil.dims[i];
4423: B->stencil.starts[i] = mat->stencil.starts[i];
4424: }
4426: B->nooffproczerorows = mat->nooffproczerorows;
4427: B->nooffprocentries = mat->nooffprocentries;
4429: PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4430: if (dm) {
4431: PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4432: }
4433: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4434: PetscObjectStateIncrease((PetscObject)B);
4435: return(0);
4436: }
4438: /*@
4439: MatGetDiagonal - Gets the diagonal of a matrix.
4441: Logically Collective on Mat and Vec
4443: Input Parameters:
4444: + mat - the matrix
4445: - v - the vector for storing the diagonal
4447: Output Parameter:
4448: . v - the diagonal of the matrix
4450: Level: intermediate
4452: Note:
4453: Currently only correct in parallel for square matrices.
4455: Concepts: matrices^accessing diagonals
4457: .seealso: MatGetRow(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs()
4458: @*/
4459: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4460: {
4467: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4468: if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4469: MatCheckPreallocated(mat,1);
4471: (*mat->ops->getdiagonal)(mat,v);
4472: PetscObjectStateIncrease((PetscObject)v);
4473: return(0);
4474: }
4476: /*@C
4477: MatGetRowMin - Gets the minimum value (of the real part) of each
4478: row of the matrix
4480: Logically Collective on Mat and Vec
4482: Input Parameters:
4483: . mat - the matrix
4485: Output Parameter:
4486: + v - the vector for storing the maximums
4487: - idx - the indices of the column found for each row (optional)
4489: Level: intermediate
4491: Notes: The result of this call are the same as if one converted the matrix to dense format
4492: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4494: This code is only implemented for a couple of matrix formats.
4496: Concepts: matrices^getting row maximums
4498: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(),
4499: MatGetRowMax()
4500: @*/
4501: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4502: {
4509: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4510: if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4511: MatCheckPreallocated(mat,1);
4513: (*mat->ops->getrowmin)(mat,v,idx);
4514: PetscObjectStateIncrease((PetscObject)v);
4515: return(0);
4516: }
4518: /*@C
4519: MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4520: row of the matrix
4522: Logically Collective on Mat and Vec
4524: Input Parameters:
4525: . mat - the matrix
4527: Output Parameter:
4528: + v - the vector for storing the minimums
4529: - idx - the indices of the column found for each row (or NULL if not needed)
4531: Level: intermediate
4533: Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4534: row is 0 (the first column).
4536: This code is only implemented for a couple of matrix formats.
4538: Concepts: matrices^getting row maximums
4540: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4541: @*/
4542: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4543: {
4550: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4551: if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4552: MatCheckPreallocated(mat,1);
4553: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4555: (*mat->ops->getrowminabs)(mat,v,idx);
4556: PetscObjectStateIncrease((PetscObject)v);
4557: return(0);
4558: }
4560: /*@C
4561: MatGetRowMax - Gets the maximum value (of the real part) of each
4562: row of the matrix
4564: Logically Collective on Mat and Vec
4566: Input Parameters:
4567: . mat - the matrix
4569: Output Parameter:
4570: + v - the vector for storing the maximums
4571: - idx - the indices of the column found for each row (optional)
4573: Level: intermediate
4575: Notes: The result of this call are the same as if one converted the matrix to dense format
4576: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4578: This code is only implemented for a couple of matrix formats.
4580: Concepts: matrices^getting row maximums
4582: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4583: @*/
4584: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4585: {
4592: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4593: if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4594: MatCheckPreallocated(mat,1);
4596: (*mat->ops->getrowmax)(mat,v,idx);
4597: PetscObjectStateIncrease((PetscObject)v);
4598: return(0);
4599: }
4601: /*@C
4602: MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4603: row of the matrix
4605: Logically Collective on Mat and Vec
4607: Input Parameters:
4608: . mat - the matrix
4610: Output Parameter:
4611: + v - the vector for storing the maximums
4612: - idx - the indices of the column found for each row (or NULL if not needed)
4614: Level: intermediate
4616: Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4617: row is 0 (the first column).
4619: This code is only implemented for a couple of matrix formats.
4621: Concepts: matrices^getting row maximums
4623: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4624: @*/
4625: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4626: {
4633: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4634: if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4635: MatCheckPreallocated(mat,1);
4636: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4638: (*mat->ops->getrowmaxabs)(mat,v,idx);
4639: PetscObjectStateIncrease((PetscObject)v);
4640: return(0);
4641: }
4643: /*@
4644: MatGetRowSum - Gets the sum of each row of the matrix
4646: Logically or Neighborhood Collective on Mat and Vec
4648: Input Parameters:
4649: . mat - the matrix
4651: Output Parameter:
4652: . v - the vector for storing the sum of rows
4654: Level: intermediate
4656: Notes: This code is slow since it is not currently specialized for different formats
4658: Concepts: matrices^getting row sums
4660: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4661: @*/
4662: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4663: {
4664: Vec ones;
4671: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4672: MatCheckPreallocated(mat,1);
4673: MatCreateVecs(mat,&ones,NULL);
4674: VecSet(ones,1.);
4675: MatMult(mat,ones,v);
4676: VecDestroy(&ones);
4677: return(0);
4678: }
4680: /*@
4681: MatTranspose - Computes an in-place or out-of-place transpose of a matrix.
4683: Collective on Mat
4685: Input Parameter:
4686: + mat - the matrix to transpose
4687: - reuse - either MAT_INITIAL_MATRIX, MAT_REUSE_MATRIX, or MAT_INPLACE_MATRIX
4689: Output Parameters:
4690: . B - the transpose
4692: Notes:
4693: If you use MAT_INPLACE_MATRIX then you must pass in &mat for B
4695: MAT_REUSE_MATRIX causes the B matrix from a previous call to this function with MAT_INITIAL_MATRIX to be used
4697: Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.
4699: Level: intermediate
4701: Concepts: matrices^transposing
4703: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4704: @*/
4705: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4706: {
4712: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4713: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4714: if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4715: if (reuse == MAT_INPLACE_MATRIX && mat != *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires last matrix to match first");
4716: if (reuse == MAT_REUSE_MATRIX && mat == *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Perhaps you mean MAT_INPLACE_MATRIX");
4717: MatCheckPreallocated(mat,1);
4719: PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4720: (*mat->ops->transpose)(mat,reuse,B);
4721: PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4722: if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4723: return(0);
4724: }
4726: /*@
4727: MatIsTranspose - Test whether a matrix is another one's transpose,
4728: or its own, in which case it tests symmetry.
4730: Collective on Mat
4732: Input Parameter:
4733: + A - the matrix to test
4734: - B - the matrix to test against, this can equal the first parameter
4736: Output Parameters:
4737: . flg - the result
4739: Notes:
4740: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4741: has a running time of the order of the number of nonzeros; the parallel
4742: test involves parallel copies of the block-offdiagonal parts of the matrix.
4744: Level: intermediate
4746: Concepts: matrices^transposing, matrix^symmetry
4748: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4749: @*/
4750: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4751: {
4752: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4758: PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4759: PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4760: *flg = PETSC_FALSE;
4761: if (f && g) {
4762: if (f == g) {
4763: (*f)(A,B,tol,flg);
4764: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4765: } else {
4766: MatType mattype;
4767: if (!f) {
4768: MatGetType(A,&mattype);
4769: } else {
4770: MatGetType(B,&mattype);
4771: }
4772: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4773: }
4774: return(0);
4775: }
4777: /*@
4778: MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.
4780: Collective on Mat
4782: Input Parameter:
4783: + mat - the matrix to transpose and complex conjugate
4784: - reuse - MAT_INITIAL_MATRIX to create a new matrix, MAT_INPLACE_MATRIX to reuse the first argument to store the transpose
4786: Output Parameters:
4787: . B - the Hermitian
4789: Level: intermediate
4791: Concepts: matrices^transposing, complex conjugatex
4793: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4794: @*/
4795: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4796: {
4800: MatTranspose(mat,reuse,B);
4801: #if defined(PETSC_USE_COMPLEX)
4802: MatConjugate(*B);
4803: #endif
4804: return(0);
4805: }
4807: /*@
4808: MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,
4810: Collective on Mat
4812: Input Parameter:
4813: + A - the matrix to test
4814: - B - the matrix to test against, this can equal the first parameter
4816: Output Parameters:
4817: . flg - the result
4819: Notes:
4820: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4821: has a running time of the order of the number of nonzeros; the parallel
4822: test involves parallel copies of the block-offdiagonal parts of the matrix.
4824: Level: intermediate
4826: Concepts: matrices^transposing, matrix^symmetry
4828: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4829: @*/
4830: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4831: {
4832: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4838: PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4839: PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4840: if (f && g) {
4841: if (f==g) {
4842: (*f)(A,B,tol,flg);
4843: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4844: }
4845: return(0);
4846: }
4848: /*@
4849: MatPermute - Creates a new matrix with rows and columns permuted from the
4850: original.
4852: Collective on Mat
4854: Input Parameters:
4855: + mat - the matrix to permute
4856: . row - row permutation, each processor supplies only the permutation for its rows
4857: - col - column permutation, each processor supplies only the permutation for its columns
4859: Output Parameters:
4860: . B - the permuted matrix
4862: Level: advanced
4864: Note:
4865: The index sets map from row/col of permuted matrix to row/col of original matrix.
4866: The index sets should be on the same communicator as Mat and have the same local sizes.
4868: Concepts: matrices^permuting
4870: .seealso: MatGetOrdering(), ISAllGather()
4872: @*/
4873: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4874: {
4883: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4884: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4885: if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4886: MatCheckPreallocated(mat,1);
4888: (*mat->ops->permute)(mat,row,col,B);
4889: PetscObjectStateIncrease((PetscObject)*B);
4890: return(0);
4891: }
4893: /*@
4894: MatEqual - Compares two matrices.
4896: Collective on Mat
4898: Input Parameters:
4899: + A - the first matrix
4900: - B - the second matrix
4902: Output Parameter:
4903: . flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.
4905: Level: intermediate
4907: Concepts: matrices^equality between
4908: @*/
4909: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool *flg)
4910: {
4920: MatCheckPreallocated(B,2);
4921: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4922: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4923: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4924: if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
4925: if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
4926: if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
4927: MatCheckPreallocated(A,1);
4929: (*A->ops->equal)(A,B,flg);
4930: return(0);
4931: }
4933: /*@C
4934: MatDiagonalScale - Scales a matrix on the left and right by diagonal
4935: matrices that are stored as vectors. Either of the two scaling
4936: matrices can be NULL.
4938: Collective on Mat
4940: Input Parameters:
4941: + mat - the matrix to be scaled
4942: . l - the left scaling vector (or NULL)
4943: - r - the right scaling vector (or NULL)
4945: Notes:
4946: MatDiagonalScale() computes A = LAR, where
4947: L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
4948: The L scales the rows of the matrix, the R scales the columns of the matrix.
4950: Level: intermediate
4952: Concepts: matrices^diagonal scaling
4953: Concepts: diagonal scaling of matrices
4955: .seealso: MatScale(), MatShift(), MatDiagonalSet()
4956: @*/
4957: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
4958: {
4964: if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4967: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4968: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4969: MatCheckPreallocated(mat,1);
4971: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4972: (*mat->ops->diagonalscale)(mat,l,r);
4973: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4974: PetscObjectStateIncrease((PetscObject)mat);
4975: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
4976: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
4977: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
4978: }
4979: #endif
4980: return(0);
4981: }
4983: /*@
4984: MatScale - Scales all elements of a matrix by a given number.
4986: Logically Collective on Mat
4988: Input Parameters:
4989: + mat - the matrix to be scaled
4990: - a - the scaling value
4992: Output Parameter:
4993: . mat - the scaled matrix
4995: Level: intermediate
4997: Concepts: matrices^scaling all entries
4999: .seealso: MatDiagonalScale()
5000: @*/
5001: PetscErrorCode MatScale(Mat mat,PetscScalar a)
5002: {
5008: if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5009: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5010: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5012: MatCheckPreallocated(mat,1);
5014: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5015: if (a != (PetscScalar)1.0) {
5016: (*mat->ops->scale)(mat,a);
5017: PetscObjectStateIncrease((PetscObject)mat);
5018: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5019: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5020: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5021: }
5022: #endif
5023: }
5024: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5025: return(0);
5026: }
5028: /*@
5029: MatNorm - Calculates various norms of a matrix.
5031: Collective on Mat
5033: Input Parameters:
5034: + mat - the matrix
5035: - type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY
5037: Output Parameters:
5038: . nrm - the resulting norm
5040: Level: intermediate
5042: Concepts: matrices^norm
5043: Concepts: norm^of matrix
5044: @*/
5045: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
5046: {
5054: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5055: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5056: if (!mat->ops->norm) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5057: MatCheckPreallocated(mat,1);
5059: (*mat->ops->norm)(mat,type,nrm);
5060: return(0);
5061: }
5063: /*
5064: This variable is used to prevent counting of MatAssemblyBegin() that
5065: are called from within a MatAssemblyEnd().
5066: */
5067: static PetscInt MatAssemblyEnd_InUse = 0;
5068: /*@
5069: MatAssemblyBegin - Begins assembling the matrix. This routine should
5070: be called after completing all calls to MatSetValues().
5072: Collective on Mat
5074: Input Parameters:
5075: + mat - the matrix
5076: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5078: Notes:
5079: MatSetValues() generally caches the values. The matrix is ready to
5080: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5081: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5082: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5083: using the matrix.
5085: ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
5086: same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
5087: a global collective operation requring all processes that share the matrix.
5089: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5090: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5091: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5093: Level: beginner
5095: Concepts: matrices^assembling
5097: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
5098: @*/
5099: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
5100: {
5106: MatCheckPreallocated(mat,1);
5107: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
5108: if (mat->assembled) {
5109: mat->was_assembled = PETSC_TRUE;
5110: mat->assembled = PETSC_FALSE;
5111: }
5112: if (!MatAssemblyEnd_InUse) {
5113: PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5114: if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5115: PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5116: } else if (mat->ops->assemblybegin) {
5117: (*mat->ops->assemblybegin)(mat,type);
5118: }
5119: return(0);
5120: }
5122: /*@
5123: MatAssembled - Indicates if a matrix has been assembled and is ready for
5124: use; for example, in matrix-vector product.
5126: Not Collective
5128: Input Parameter:
5129: . mat - the matrix
5131: Output Parameter:
5132: . assembled - PETSC_TRUE or PETSC_FALSE
5134: Level: advanced
5136: Concepts: matrices^assembled?
5138: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5139: @*/
5140: PetscErrorCode MatAssembled(Mat mat,PetscBool *assembled)
5141: {
5146: *assembled = mat->assembled;
5147: return(0);
5148: }
5150: /*@
5151: MatAssemblyEnd - Completes assembling the matrix. This routine should
5152: be called after MatAssemblyBegin().
5154: Collective on Mat
5156: Input Parameters:
5157: + mat - the matrix
5158: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5160: Options Database Keys:
5161: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5162: . -mat_view ::ascii_info_detail - Prints more detailed info
5163: . -mat_view - Prints matrix in ASCII format
5164: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
5165: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5166: . -display <name> - Sets display name (default is host)
5167: . -draw_pause <sec> - Sets number of seconds to pause after display
5168: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 12 Using MATLAB with PETSc )
5169: . -viewer_socket_machine <machine> - Machine to use for socket
5170: . -viewer_socket_port <port> - Port number to use for socket
5171: - -mat_view binary:filename[:append] - Save matrix to file in binary format
5173: Notes:
5174: MatSetValues() generally caches the values. The matrix is ready to
5175: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5176: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5177: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5178: using the matrix.
5180: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5181: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5182: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5184: Level: beginner
5186: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5187: @*/
5188: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5189: {
5190: PetscErrorCode ierr;
5191: static PetscInt inassm = 0;
5192: PetscBool flg = PETSC_FALSE;
5198: inassm++;
5199: MatAssemblyEnd_InUse++;
5200: if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5201: PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5202: if (mat->ops->assemblyend) {
5203: (*mat->ops->assemblyend)(mat,type);
5204: }
5205: PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5206: } else if (mat->ops->assemblyend) {
5207: (*mat->ops->assemblyend)(mat,type);
5208: }
5210: /* Flush assembly is not a true assembly */
5211: if (type != MAT_FLUSH_ASSEMBLY) {
5212: mat->assembled = PETSC_TRUE; mat->num_ass++;
5213: }
5214: mat->insertmode = NOT_SET_VALUES;
5215: MatAssemblyEnd_InUse--;
5216: PetscObjectStateIncrease((PetscObject)mat);
5217: if (!mat->symmetric_eternal) {
5218: mat->symmetric_set = PETSC_FALSE;
5219: mat->hermitian_set = PETSC_FALSE;
5220: mat->structurally_symmetric_set = PETSC_FALSE;
5221: }
5222: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5223: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5224: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5225: }
5226: #endif
5227: if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5228: MatViewFromOptions(mat,NULL,"-mat_view");
5230: if (mat->checksymmetryonassembly) {
5231: MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5232: if (flg) {
5233: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5234: } else {
5235: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5236: }
5237: }
5238: if (mat->nullsp && mat->checknullspaceonassembly) {
5239: MatNullSpaceTest(mat->nullsp,mat,NULL);
5240: }
5241: }
5242: inassm--;
5243: return(0);
5244: }
5246: /*@
5247: MatSetOption - Sets a parameter option for a matrix. Some options
5248: may be specific to certain storage formats. Some options
5249: determine how values will be inserted (or added). Sorted,
5250: row-oriented input will generally assemble the fastest. The default
5251: is row-oriented.
5253: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5255: Input Parameters:
5256: + mat - the matrix
5257: . option - the option, one of those listed below (and possibly others),
5258: - flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5260: Options Describing Matrix Structure:
5261: + MAT_SPD - symmetric positive definite
5262: . MAT_SYMMETRIC - symmetric in terms of both structure and value
5263: . MAT_HERMITIAN - transpose is the complex conjugation
5264: . MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5265: - MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5266: you set to be kept with all future use of the matrix
5267: including after MatAssemblyBegin/End() which could
5268: potentially change the symmetry structure, i.e. you
5269: KNOW the matrix will ALWAYS have the property you set.
5272: Options For Use with MatSetValues():
5273: Insert a logically dense subblock, which can be
5274: . MAT_ROW_ORIENTED - row-oriented (default)
5276: Note these options reflect the data you pass in with MatSetValues(); it has
5277: nothing to do with how the data is stored internally in the matrix
5278: data structure.
5280: When (re)assembling a matrix, we can restrict the input for
5281: efficiency/debugging purposes. These options include:
5282: + MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5283: . MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5284: . MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5285: . MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5286: . MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5287: . MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5288: any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5289: performance for very large process counts.
5290: - MAT_SUBSET_OFF_PROC_ENTRIES - you know that the first assembly after setting this flag will set a superset
5291: of the off-process entries required for all subsequent assemblies. This avoids a rendezvous step in the MatAssembly
5292: functions, instead sending only neighbor messages.
5294: Notes:
5295: Except for MAT_UNUSED_NONZERO_LOCATION_ERR and MAT_ROW_ORIENTED all processes that share the matrix must pass the same value in flg!
5297: Some options are relevant only for particular matrix types and
5298: are thus ignored by others. Other options are not supported by
5299: certain matrix types and will generate an error message if set.
5301: If using a Fortran 77 module to compute a matrix, one may need to
5302: use the column-oriented option (or convert to the row-oriented
5303: format).
5305: MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5306: that would generate a new entry in the nonzero structure is instead
5307: ignored. Thus, if memory has not alredy been allocated for this particular
5308: data, then the insertion is ignored. For dense matrices, in which
5309: the entire array is allocated, no entries are ever ignored.
5310: Set after the first MatAssemblyEnd(). If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5312: MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5313: that would generate a new entry in the nonzero structure instead produces
5314: an error. (Currently supported for AIJ and BAIJ formats only.) If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5316: MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5317: that would generate a new entry that has not been preallocated will
5318: instead produce an error. (Currently supported for AIJ and BAIJ formats
5319: only.) This is a useful flag when debugging matrix memory preallocation.
5320: If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5322: MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5323: other processors should be dropped, rather than stashed.
5324: This is useful if you know that the "owning" processor is also
5325: always generating the correct matrix entries, so that PETSc need
5326: not transfer duplicate entries generated on another processor.
5328: MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5329: searches during matrix assembly. When this flag is set, the hash table
5330: is created during the first Matrix Assembly. This hash table is
5331: used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5332: to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5333: should be used with MAT_USE_HASH_TABLE flag. This option is currently
5334: supported by MATMPIBAIJ format only.
5336: MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5337: are kept in the nonzero structure
5339: MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5340: a zero location in the matrix
5342: MAT_USE_INODES - indicates using inode version of the code - works with AIJ matrix types
5344: MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5345: zero row routines and thus improves performance for very large process counts.
5347: MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5348: part of the matrix (since they should match the upper triangular part).
5350: Notes: Can only be called after MatSetSizes() and MatSetType() have been set.
5352: Level: intermediate
5354: Concepts: matrices^setting options
5356: .seealso: MatOption, Mat
5358: @*/
5359: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5360: {
5366: if (op > 0) {
5369: }
5371: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5372: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");
5374: switch (op) {
5375: case MAT_NO_OFF_PROC_ENTRIES:
5376: mat->nooffprocentries = flg;
5377: return(0);
5378: break;
5379: case MAT_SUBSET_OFF_PROC_ENTRIES:
5380: mat->subsetoffprocentries = flg;
5381: return(0);
5382: case MAT_NO_OFF_PROC_ZERO_ROWS:
5383: mat->nooffproczerorows = flg;
5384: return(0);
5385: break;
5386: case MAT_SPD:
5387: mat->spd_set = PETSC_TRUE;
5388: mat->spd = flg;
5389: if (flg) {
5390: mat->symmetric = PETSC_TRUE;
5391: mat->structurally_symmetric = PETSC_TRUE;
5392: mat->symmetric_set = PETSC_TRUE;
5393: mat->structurally_symmetric_set = PETSC_TRUE;
5394: }
5395: break;
5396: case MAT_SYMMETRIC:
5397: mat->symmetric = flg;
5398: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5399: mat->symmetric_set = PETSC_TRUE;
5400: mat->structurally_symmetric_set = flg;
5401: #if !defined(PETSC_USE_COMPLEX)
5402: mat->hermitian = flg;
5403: mat->hermitian_set = PETSC_TRUE;
5404: #endif
5405: break;
5406: case MAT_HERMITIAN:
5407: mat->hermitian = flg;
5408: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5409: mat->hermitian_set = PETSC_TRUE;
5410: mat->structurally_symmetric_set = flg;
5411: #if !defined(PETSC_USE_COMPLEX)
5412: mat->symmetric = flg;
5413: mat->symmetric_set = PETSC_TRUE;
5414: #endif
5415: break;
5416: case MAT_STRUCTURALLY_SYMMETRIC:
5417: mat->structurally_symmetric = flg;
5418: mat->structurally_symmetric_set = PETSC_TRUE;
5419: break;
5420: case MAT_SYMMETRY_ETERNAL:
5421: mat->symmetric_eternal = flg;
5422: break;
5423: case MAT_STRUCTURE_ONLY:
5424: mat->structure_only = flg;
5425: break;
5426: default:
5427: break;
5428: }
5429: if (mat->ops->setoption) {
5430: (*mat->ops->setoption)(mat,op,flg);
5431: }
5432: return(0);
5433: }
5435: /*@
5436: MatGetOption - Gets a parameter option that has been set for a matrix.
5438: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5440: Input Parameters:
5441: + mat - the matrix
5442: - option - the option, this only responds to certain options, check the code for which ones
5444: Output Parameter:
5445: . flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5447: Notes: Can only be called after MatSetSizes() and MatSetType() have been set.
5449: Level: intermediate
5451: Concepts: matrices^setting options
5453: .seealso: MatOption, MatSetOption()
5455: @*/
5456: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5457: {
5462: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5463: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");
5465: switch (op) {
5466: case MAT_NO_OFF_PROC_ENTRIES:
5467: *flg = mat->nooffprocentries;
5468: break;
5469: case MAT_NO_OFF_PROC_ZERO_ROWS:
5470: *flg = mat->nooffproczerorows;
5471: break;
5472: case MAT_SYMMETRIC:
5473: *flg = mat->symmetric;
5474: break;
5475: case MAT_HERMITIAN:
5476: *flg = mat->hermitian;
5477: break;
5478: case MAT_STRUCTURALLY_SYMMETRIC:
5479: *flg = mat->structurally_symmetric;
5480: break;
5481: case MAT_SYMMETRY_ETERNAL:
5482: *flg = mat->symmetric_eternal;
5483: break;
5484: case MAT_SPD:
5485: *flg = mat->spd;
5486: break;
5487: default:
5488: break;
5489: }
5490: return(0);
5491: }
5493: /*@
5494: MatZeroEntries - Zeros all entries of a matrix. For sparse matrices
5495: this routine retains the old nonzero structure.
5497: Logically Collective on Mat
5499: Input Parameters:
5500: . mat - the matrix
5502: Level: intermediate
5504: Notes: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5505: See the Performance chapter of the users manual for information on preallocating matrices.
5507: Concepts: matrices^zeroing
5509: .seealso: MatZeroRows()
5510: @*/
5511: PetscErrorCode MatZeroEntries(Mat mat)
5512: {
5518: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5519: if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5520: if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5521: MatCheckPreallocated(mat,1);
5523: PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5524: (*mat->ops->zeroentries)(mat);
5525: PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5526: PetscObjectStateIncrease((PetscObject)mat);
5527: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5528: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5529: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5530: }
5531: #endif
5532: return(0);
5533: }
5535: /*@C
5536: MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5537: of a set of rows and columns of a matrix.
5539: Collective on Mat
5541: Input Parameters:
5542: + mat - the matrix
5543: . numRows - the number of rows to remove
5544: . rows - the global row indices
5545: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5546: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5547: - b - optional vector of right hand side, that will be adjusted by provided solution
5549: Notes:
5550: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5552: The user can set a value in the diagonal entry (or for the AIJ and
5553: row formats can optionally remove the main diagonal entry from the
5554: nonzero structure as well, by passing 0.0 as the final argument).
5556: For the parallel case, all processes that share the matrix (i.e.,
5557: those in the communicator used for matrix creation) MUST call this
5558: routine, regardless of whether any rows being zeroed are owned by
5559: them.
5561: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5562: list only rows local to itself).
5564: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5566: Level: intermediate
5568: Concepts: matrices^zeroing rows
5570: .seealso: MatZeroRowsIS(), MatZeroRows(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5571: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5572: @*/
5573: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5574: {
5581: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5582: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5583: if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5584: MatCheckPreallocated(mat,1);
5586: (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5587: MatViewFromOptions(mat,NULL,"-mat_view");
5588: PetscObjectStateIncrease((PetscObject)mat);
5589: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5590: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5591: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5592: }
5593: #endif
5594: return(0);
5595: }
5597: /*@C
5598: MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5599: of a set of rows and columns of a matrix.
5601: Collective on Mat
5603: Input Parameters:
5604: + mat - the matrix
5605: . is - the rows to zero
5606: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5607: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5608: - b - optional vector of right hand side, that will be adjusted by provided solution
5610: Notes:
5611: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5613: The user can set a value in the diagonal entry (or for the AIJ and
5614: row formats can optionally remove the main diagonal entry from the
5615: nonzero structure as well, by passing 0.0 as the final argument).
5617: For the parallel case, all processes that share the matrix (i.e.,
5618: those in the communicator used for matrix creation) MUST call this
5619: routine, regardless of whether any rows being zeroed are owned by
5620: them.
5622: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5623: list only rows local to itself).
5625: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5627: Level: intermediate
5629: Concepts: matrices^zeroing rows
5631: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5632: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRows(), MatZeroRowsColumnsStencil()
5633: @*/
5634: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5635: {
5637: PetscInt numRows;
5638: const PetscInt *rows;
5645: ISGetLocalSize(is,&numRows);
5646: ISGetIndices(is,&rows);
5647: MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5648: ISRestoreIndices(is,&rows);
5649: return(0);
5650: }
5652: /*@C
5653: MatZeroRows - Zeros all entries (except possibly the main diagonal)
5654: of a set of rows of a matrix.
5656: Collective on Mat
5658: Input Parameters:
5659: + mat - the matrix
5660: . numRows - the number of rows to remove
5661: . rows - the global row indices
5662: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5663: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5664: - b - optional vector of right hand side, that will be adjusted by provided solution
5666: Notes:
5667: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5668: but does not release memory. For the dense and block diagonal
5669: formats this does not alter the nonzero structure.
5671: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5672: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5673: merely zeroed.
5675: The user can set a value in the diagonal entry (or for the AIJ and
5676: row formats can optionally remove the main diagonal entry from the
5677: nonzero structure as well, by passing 0.0 as the final argument).
5679: For the parallel case, all processes that share the matrix (i.e.,
5680: those in the communicator used for matrix creation) MUST call this
5681: routine, regardless of whether any rows being zeroed are owned by
5682: them.
5684: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5685: list only rows local to itself).
5687: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5688: owns that are to be zeroed. This saves a global synchronization in the implementation.
5690: Level: intermediate
5692: Concepts: matrices^zeroing rows
5694: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5695: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5696: @*/
5697: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5698: {
5705: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5706: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5707: if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5708: MatCheckPreallocated(mat,1);
5710: (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5711: MatViewFromOptions(mat,NULL,"-mat_view");
5712: PetscObjectStateIncrease((PetscObject)mat);
5713: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5714: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5715: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5716: }
5717: #endif
5718: return(0);
5719: }
5721: /*@C
5722: MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5723: of a set of rows of a matrix.
5725: Collective on Mat
5727: Input Parameters:
5728: + mat - the matrix
5729: . is - index set of rows to remove
5730: . diag - value put in all diagonals of eliminated rows
5731: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5732: - b - optional vector of right hand side, that will be adjusted by provided solution
5734: Notes:
5735: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5736: but does not release memory. For the dense and block diagonal
5737: formats this does not alter the nonzero structure.
5739: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5740: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5741: merely zeroed.
5743: The user can set a value in the diagonal entry (or for the AIJ and
5744: row formats can optionally remove the main diagonal entry from the
5745: nonzero structure as well, by passing 0.0 as the final argument).
5747: For the parallel case, all processes that share the matrix (i.e.,
5748: those in the communicator used for matrix creation) MUST call this
5749: routine, regardless of whether any rows being zeroed are owned by
5750: them.
5752: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5753: list only rows local to itself).
5755: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5756: owns that are to be zeroed. This saves a global synchronization in the implementation.
5758: Level: intermediate
5760: Concepts: matrices^zeroing rows
5762: .seealso: MatZeroRows(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5763: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5764: @*/
5765: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5766: {
5767: PetscInt numRows;
5768: const PetscInt *rows;
5775: ISGetLocalSize(is,&numRows);
5776: ISGetIndices(is,&rows);
5777: MatZeroRows(mat,numRows,rows,diag,x,b);
5778: ISRestoreIndices(is,&rows);
5779: return(0);
5780: }
5782: /*@C
5783: MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5784: of a set of rows of a matrix. These rows must be local to the process.
5786: Collective on Mat
5788: Input Parameters:
5789: + mat - the matrix
5790: . numRows - the number of rows to remove
5791: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5792: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5793: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5794: - b - optional vector of right hand side, that will be adjusted by provided solution
5796: Notes:
5797: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5798: but does not release memory. For the dense and block diagonal
5799: formats this does not alter the nonzero structure.
5801: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5802: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5803: merely zeroed.
5805: The user can set a value in the diagonal entry (or for the AIJ and
5806: row formats can optionally remove the main diagonal entry from the
5807: nonzero structure as well, by passing 0.0 as the final argument).
5809: For the parallel case, all processes that share the matrix (i.e.,
5810: those in the communicator used for matrix creation) MUST call this
5811: routine, regardless of whether any rows being zeroed are owned by
5812: them.
5814: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5815: list only rows local to itself).
5817: The grid coordinates are across the entire grid, not just the local portion
5819: In Fortran idxm and idxn should be declared as
5820: $ MatStencil idxm(4,m)
5821: and the values inserted using
5822: $ idxm(MatStencil_i,1) = i
5823: $ idxm(MatStencil_j,1) = j
5824: $ idxm(MatStencil_k,1) = k
5825: $ idxm(MatStencil_c,1) = c
5826: etc
5828: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5829: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5830: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5831: DM_BOUNDARY_PERIODIC boundary type.
5833: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5834: a single value per point) you can skip filling those indices.
5836: Level: intermediate
5838: Concepts: matrices^zeroing rows
5840: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsl(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5841: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5842: @*/
5843: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5844: {
5845: PetscInt dim = mat->stencil.dim;
5846: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5847: PetscInt *dims = mat->stencil.dims+1;
5848: PetscInt *starts = mat->stencil.starts;
5849: PetscInt *dxm = (PetscInt*) rows;
5850: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5858: PetscMalloc1(numRows, &jdxm);
5859: for (i = 0; i < numRows; ++i) {
5860: /* Skip unused dimensions (they are ordered k, j, i, c) */
5861: for (j = 0; j < 3-sdim; ++j) dxm++;
5862: /* Local index in X dir */
5863: tmp = *dxm++ - starts[0];
5864: /* Loop over remaining dimensions */
5865: for (j = 0; j < dim-1; ++j) {
5866: /* If nonlocal, set index to be negative */
5867: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5868: /* Update local index */
5869: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5870: }
5871: /* Skip component slot if necessary */
5872: if (mat->stencil.noc) dxm++;
5873: /* Local row number */
5874: if (tmp >= 0) {
5875: jdxm[numNewRows++] = tmp;
5876: }
5877: }
5878: MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
5879: PetscFree(jdxm);
5880: return(0);
5881: }
5883: /*@C
5884: MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
5885: of a set of rows and columns of a matrix.
5887: Collective on Mat
5889: Input Parameters:
5890: + mat - the matrix
5891: . numRows - the number of rows/columns to remove
5892: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5893: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5894: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5895: - b - optional vector of right hand side, that will be adjusted by provided solution
5897: Notes:
5898: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5899: but does not release memory. For the dense and block diagonal
5900: formats this does not alter the nonzero structure.
5902: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5903: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5904: merely zeroed.
5906: The user can set a value in the diagonal entry (or for the AIJ and
5907: row formats can optionally remove the main diagonal entry from the
5908: nonzero structure as well, by passing 0.0 as the final argument).
5910: For the parallel case, all processes that share the matrix (i.e.,
5911: those in the communicator used for matrix creation) MUST call this
5912: routine, regardless of whether any rows being zeroed are owned by
5913: them.
5915: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5916: list only rows local to itself, but the row/column numbers are given in local numbering).
5918: The grid coordinates are across the entire grid, not just the local portion
5920: In Fortran idxm and idxn should be declared as
5921: $ MatStencil idxm(4,m)
5922: and the values inserted using
5923: $ idxm(MatStencil_i,1) = i
5924: $ idxm(MatStencil_j,1) = j
5925: $ idxm(MatStencil_k,1) = k
5926: $ idxm(MatStencil_c,1) = c
5927: etc
5929: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5930: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5931: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5932: DM_BOUNDARY_PERIODIC boundary type.
5934: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5935: a single value per point) you can skip filling those indices.
5937: Level: intermediate
5939: Concepts: matrices^zeroing rows
5941: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5942: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRows()
5943: @*/
5944: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5945: {
5946: PetscInt dim = mat->stencil.dim;
5947: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5948: PetscInt *dims = mat->stencil.dims+1;
5949: PetscInt *starts = mat->stencil.starts;
5950: PetscInt *dxm = (PetscInt*) rows;
5951: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5959: PetscMalloc1(numRows, &jdxm);
5960: for (i = 0; i < numRows; ++i) {
5961: /* Skip unused dimensions (they are ordered k, j, i, c) */
5962: for (j = 0; j < 3-sdim; ++j) dxm++;
5963: /* Local index in X dir */
5964: tmp = *dxm++ - starts[0];
5965: /* Loop over remaining dimensions */
5966: for (j = 0; j < dim-1; ++j) {
5967: /* If nonlocal, set index to be negative */
5968: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5969: /* Update local index */
5970: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5971: }
5972: /* Skip component slot if necessary */
5973: if (mat->stencil.noc) dxm++;
5974: /* Local row number */
5975: if (tmp >= 0) {
5976: jdxm[numNewRows++] = tmp;
5977: }
5978: }
5979: MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
5980: PetscFree(jdxm);
5981: return(0);
5982: }
5984: /*@C
5985: MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
5986: of a set of rows of a matrix; using local numbering of rows.
5988: Collective on Mat
5990: Input Parameters:
5991: + mat - the matrix
5992: . numRows - the number of rows to remove
5993: . rows - the global row indices
5994: . diag - value put in all diagonals of eliminated rows
5995: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5996: - b - optional vector of right hand side, that will be adjusted by provided solution
5998: Notes:
5999: Before calling MatZeroRowsLocal(), the user must first set the
6000: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6002: For the AIJ matrix formats this removes the old nonzero structure,
6003: but does not release memory. For the dense and block diagonal
6004: formats this does not alter the nonzero structure.
6006: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6007: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6008: merely zeroed.
6010: The user can set a value in the diagonal entry (or for the AIJ and
6011: row formats can optionally remove the main diagonal entry from the
6012: nonzero structure as well, by passing 0.0 as the final argument).
6014: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6015: owns that are to be zeroed. This saves a global synchronization in the implementation.
6017: Level: intermediate
6019: Concepts: matrices^zeroing
6021: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRows(), MatSetOption(),
6022: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6023: @*/
6024: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6025: {
6032: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6033: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6034: MatCheckPreallocated(mat,1);
6036: if (mat->ops->zerorowslocal) {
6037: (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
6038: } else {
6039: IS is, newis;
6040: const PetscInt *newRows;
6042: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6043: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6044: ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
6045: ISGetIndices(newis,&newRows);
6046: (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
6047: ISRestoreIndices(newis,&newRows);
6048: ISDestroy(&newis);
6049: ISDestroy(&is);
6050: }
6051: PetscObjectStateIncrease((PetscObject)mat);
6052: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
6053: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
6054: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
6055: }
6056: #endif
6057: return(0);
6058: }
6060: /*@C
6061: MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
6062: of a set of rows of a matrix; using local numbering of rows.
6064: Collective on Mat
6066: Input Parameters:
6067: + mat - the matrix
6068: . is - index set of rows to remove
6069: . diag - value put in all diagonals of eliminated rows
6070: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6071: - b - optional vector of right hand side, that will be adjusted by provided solution
6073: Notes:
6074: Before calling MatZeroRowsLocalIS(), the user must first set the
6075: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6077: For the AIJ matrix formats this removes the old nonzero structure,
6078: but does not release memory. For the dense and block diagonal
6079: formats this does not alter the nonzero structure.
6081: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6082: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6083: merely zeroed.
6085: The user can set a value in the diagonal entry (or for the AIJ and
6086: row formats can optionally remove the main diagonal entry from the
6087: nonzero structure as well, by passing 0.0 as the final argument).
6089: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6090: owns that are to be zeroed. This saves a global synchronization in the implementation.
6092: Level: intermediate
6094: Concepts: matrices^zeroing
6096: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6097: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6098: @*/
6099: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6100: {
6102: PetscInt numRows;
6103: const PetscInt *rows;
6109: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6110: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6111: MatCheckPreallocated(mat,1);
6113: ISGetLocalSize(is,&numRows);
6114: ISGetIndices(is,&rows);
6115: MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6116: ISRestoreIndices(is,&rows);
6117: return(0);
6118: }
6120: /*@C
6121: MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6122: of a set of rows and columns of a matrix; using local numbering of rows.
6124: Collective on Mat
6126: Input Parameters:
6127: + mat - the matrix
6128: . numRows - the number of rows to remove
6129: . rows - the global row indices
6130: . diag - value put in all diagonals of eliminated rows
6131: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6132: - b - optional vector of right hand side, that will be adjusted by provided solution
6134: Notes:
6135: Before calling MatZeroRowsColumnsLocal(), the user must first set the
6136: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6138: The user can set a value in the diagonal entry (or for the AIJ and
6139: row formats can optionally remove the main diagonal entry from the
6140: nonzero structure as well, by passing 0.0 as the final argument).
6142: Level: intermediate
6144: Concepts: matrices^zeroing
6146: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6147: MatZeroRows(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6148: @*/
6149: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6150: {
6152: IS is, newis;
6153: const PetscInt *newRows;
6159: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6160: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6161: MatCheckPreallocated(mat,1);
6163: if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6164: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6165: ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6166: ISGetIndices(newis,&newRows);
6167: (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6168: ISRestoreIndices(newis,&newRows);
6169: ISDestroy(&newis);
6170: ISDestroy(&is);
6171: PetscObjectStateIncrease((PetscObject)mat);
6172: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
6173: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
6174: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
6175: }
6176: #endif
6177: return(0);
6178: }
6180: /*@C
6181: MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6182: of a set of rows and columns of a matrix; using local numbering of rows.
6184: Collective on Mat
6186: Input Parameters:
6187: + mat - the matrix
6188: . is - index set of rows to remove
6189: . diag - value put in all diagonals of eliminated rows
6190: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6191: - b - optional vector of right hand side, that will be adjusted by provided solution
6193: Notes:
6194: Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6195: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6197: The user can set a value in the diagonal entry (or for the AIJ and
6198: row formats can optionally remove the main diagonal entry from the
6199: nonzero structure as well, by passing 0.0 as the final argument).
6201: Level: intermediate
6203: Concepts: matrices^zeroing
6205: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6206: MatZeroRowsColumnsLocal(), MatZeroRows(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6207: @*/
6208: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6209: {
6211: PetscInt numRows;
6212: const PetscInt *rows;
6218: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6219: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6220: MatCheckPreallocated(mat,1);
6222: ISGetLocalSize(is,&numRows);
6223: ISGetIndices(is,&rows);
6224: MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6225: ISRestoreIndices(is,&rows);
6226: return(0);
6227: }
6229: /*@C
6230: MatGetSize - Returns the numbers of rows and columns in a matrix.
6232: Not Collective
6234: Input Parameter:
6235: . mat - the matrix
6237: Output Parameters:
6238: + m - the number of global rows
6239: - n - the number of global columns
6241: Note: both output parameters can be NULL on input.
6243: Level: beginner
6245: Concepts: matrices^size
6247: .seealso: MatGetLocalSize()
6248: @*/
6249: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6250: {
6253: if (m) *m = mat->rmap->N;
6254: if (n) *n = mat->cmap->N;
6255: return(0);
6256: }
6258: /*@C
6259: MatGetLocalSize - Returns the number of rows and columns in a matrix
6260: stored locally. This information may be implementation dependent, so
6261: use with care.
6263: Not Collective
6265: Input Parameters:
6266: . mat - the matrix
6268: Output Parameters:
6269: + m - the number of local rows
6270: - n - the number of local columns
6272: Note: both output parameters can be NULL on input.
6274: Level: beginner
6276: Concepts: matrices^local size
6278: .seealso: MatGetSize()
6279: @*/
6280: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6281: {
6286: if (m) *m = mat->rmap->n;
6287: if (n) *n = mat->cmap->n;
6288: return(0);
6289: }
6291: /*@C
6292: MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6293: this processor. (The columns of the "diagonal block")
6295: Not Collective, unless matrix has not been allocated, then collective on Mat
6297: Input Parameters:
6298: . mat - the matrix
6300: Output Parameters:
6301: + m - the global index of the first local column
6302: - n - one more than the global index of the last local column
6304: Notes: both output parameters can be NULL on input.
6306: Level: developer
6308: Concepts: matrices^column ownership
6310: .seealso: MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()
6312: @*/
6313: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6314: {
6320: MatCheckPreallocated(mat,1);
6321: if (m) *m = mat->cmap->rstart;
6322: if (n) *n = mat->cmap->rend;
6323: return(0);
6324: }
6326: /*@C
6327: MatGetOwnershipRange - Returns the range of matrix rows owned by
6328: this processor, assuming that the matrix is laid out with the first
6329: n1 rows on the first processor, the next n2 rows on the second, etc.
6330: For certain parallel layouts this range may not be well defined.
6332: Not Collective
6334: Input Parameters:
6335: . mat - the matrix
6337: Output Parameters:
6338: + m - the global index of the first local row
6339: - n - one more than the global index of the last local row
6341: Note: Both output parameters can be NULL on input.
6342: $ This function requires that the matrix be preallocated. If you have not preallocated, consider using
6343: $ PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6344: $ and then MPI_Scan() to calculate prefix sums of the local sizes.
6346: Level: beginner
6348: Concepts: matrices^row ownership
6350: .seealso: MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()
6352: @*/
6353: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6354: {
6360: MatCheckPreallocated(mat,1);
6361: if (m) *m = mat->rmap->rstart;
6362: if (n) *n = mat->rmap->rend;
6363: return(0);
6364: }
6366: /*@C
6367: MatGetOwnershipRanges - Returns the range of matrix rows owned by
6368: each process
6370: Not Collective, unless matrix has not been allocated, then collective on Mat
6372: Input Parameters:
6373: . mat - the matrix
6375: Output Parameters:
6376: . ranges - start of each processors portion plus one more than the total length at the end
6378: Level: beginner
6380: Concepts: matrices^row ownership
6382: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()
6384: @*/
6385: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6386: {
6392: MatCheckPreallocated(mat,1);
6393: PetscLayoutGetRanges(mat->rmap,ranges);
6394: return(0);
6395: }
6397: /*@C
6398: MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6399: this processor. (The columns of the "diagonal blocks" for each process)
6401: Not Collective, unless matrix has not been allocated, then collective on Mat
6403: Input Parameters:
6404: . mat - the matrix
6406: Output Parameters:
6407: . ranges - start of each processors portion plus one more then the total length at the end
6409: Level: beginner
6411: Concepts: matrices^column ownership
6413: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()
6415: @*/
6416: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6417: {
6423: MatCheckPreallocated(mat,1);
6424: PetscLayoutGetRanges(mat->cmap,ranges);
6425: return(0);
6426: }
6428: /*@C
6429: MatGetOwnershipIS - Get row and column ownership as index sets
6431: Not Collective
6433: Input Arguments:
6434: . A - matrix of type Elemental
6436: Output Arguments:
6437: + rows - rows in which this process owns elements
6438: . cols - columns in which this process owns elements
6440: Level: intermediate
6442: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL
6443: @*/
6444: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6445: {
6446: PetscErrorCode ierr,(*f)(Mat,IS*,IS*);
6449: MatCheckPreallocated(A,1);
6450: PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6451: if (f) {
6452: (*f)(A,rows,cols);
6453: } else { /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6454: if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6455: if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6456: }
6457: return(0);
6458: }
6460: /*@C
6461: MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6462: Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6463: to complete the factorization.
6465: Collective on Mat
6467: Input Parameters:
6468: + mat - the matrix
6469: . row - row permutation
6470: . column - column permutation
6471: - info - structure containing
6472: $ levels - number of levels of fill.
6473: $ expected fill - as ratio of original fill.
6474: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6475: missing diagonal entries)
6477: Output Parameters:
6478: . fact - new matrix that has been symbolically factored
6480: Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
6482: Most users should employ the simplified KSP interface for linear solvers
6483: instead of working directly with matrix algebra routines such as this.
6484: See, e.g., KSPCreate().
6486: Level: developer
6488: Concepts: matrices^symbolic LU factorization
6489: Concepts: matrices^factorization
6490: Concepts: LU^symbolic factorization
6492: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6493: MatGetOrdering(), MatFactorInfo
6495: Developer Note: fortran interface is not autogenerated as the f90
6496: interface defintion cannot be generated correctly [due to MatFactorInfo]
6498: @*/
6499: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6500: {
6510: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6511: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6512: if (!(fact)->ops->ilufactorsymbolic) {
6513: MatSolverType spackage;
6514: MatFactorGetSolverType(fact,&spackage);
6515: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6516: }
6517: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6518: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6519: MatCheckPreallocated(mat,2);
6521: PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6522: (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6523: PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6524: return(0);
6525: }
6527: /*@C
6528: MatICCFactorSymbolic - Performs symbolic incomplete
6529: Cholesky factorization for a symmetric matrix. Use
6530: MatCholeskyFactorNumeric() to complete the factorization.
6532: Collective on Mat
6534: Input Parameters:
6535: + mat - the matrix
6536: . perm - row and column permutation
6537: - info - structure containing
6538: $ levels - number of levels of fill.
6539: $ expected fill - as ratio of original fill.
6541: Output Parameter:
6542: . fact - the factored matrix
6544: Notes:
6545: Most users should employ the KSP interface for linear solvers
6546: instead of working directly with matrix algebra routines such as this.
6547: See, e.g., KSPCreate().
6549: Level: developer
6551: Concepts: matrices^symbolic incomplete Cholesky factorization
6552: Concepts: matrices^factorization
6553: Concepts: Cholsky^symbolic factorization
6555: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
6557: Developer Note: fortran interface is not autogenerated as the f90
6558: interface defintion cannot be generated correctly [due to MatFactorInfo]
6560: @*/
6561: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6562: {
6571: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6572: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6573: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6574: if (!(fact)->ops->iccfactorsymbolic) {
6575: MatSolverType spackage;
6576: MatFactorGetSolverType(fact,&spackage);
6577: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6578: }
6579: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6580: MatCheckPreallocated(mat,2);
6582: PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6583: (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6584: PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6585: return(0);
6586: }
6588: /*@C
6589: MatCreateSubMatrices - Extracts several submatrices from a matrix. If submat
6590: points to an array of valid matrices, they may be reused to store the new
6591: submatrices.
6593: Collective on Mat
6595: Input Parameters:
6596: + mat - the matrix
6597: . n - the number of submatrixes to be extracted (on this processor, may be zero)
6598: . irow, icol - index sets of rows and columns to extract
6599: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6601: Output Parameter:
6602: . submat - the array of submatrices
6604: Notes:
6605: MatCreateSubMatrices() can extract ONLY sequential submatrices
6606: (from both sequential and parallel matrices). Use MatCreateSubMatrix()
6607: to extract a parallel submatrix.
6609: Some matrix types place restrictions on the row and column
6610: indices, such as that they be sorted or that they be equal to each other.
6612: The index sets may not have duplicate entries.
6614: When extracting submatrices from a parallel matrix, each processor can
6615: form a different submatrix by setting the rows and columns of its
6616: individual index sets according to the local submatrix desired.
6618: When finished using the submatrices, the user should destroy
6619: them with MatDestroySubMatrices().
6621: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6622: original matrix has not changed from that last call to MatCreateSubMatrices().
6624: This routine creates the matrices in submat; you should NOT create them before
6625: calling it. It also allocates the array of matrix pointers submat.
6627: For BAIJ matrices the index sets must respect the block structure, that is if they
6628: request one row/column in a block, they must request all rows/columns that are in
6629: that block. For example, if the block size is 2 you cannot request just row 0 and
6630: column 0.
6632: Fortran Note:
6633: The Fortran interface is slightly different from that given below; it
6634: requires one to pass in as submat a Mat (integer) array of size at least n+1.
6636: Level: advanced
6638: Concepts: matrices^accessing submatrices
6639: Concepts: submatrices
6641: .seealso: MatDestroySubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6642: @*/
6643: PetscErrorCode MatCreateSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6644: {
6646: PetscInt i;
6647: PetscBool eq;
6652: if (n) {
6657: }
6659: if (n && scall == MAT_REUSE_MATRIX) {
6662: }
6663: if (!mat->ops->createsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6664: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6665: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6666: MatCheckPreallocated(mat,1);
6668: PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6669: (*mat->ops->createsubmatrices)(mat,n,irow,icol,scall,submat);
6670: PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6671: for (i=0; i<n; i++) {
6672: (*submat)[i]->factortype = MAT_FACTOR_NONE; /* in case in place factorization was previously done on submatrix */
6673: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6674: ISEqual(irow[i],icol[i],&eq);
6675: if (eq) {
6676: if (mat->symmetric) {
6677: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6678: } else if (mat->hermitian) {
6679: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6680: } else if (mat->structurally_symmetric) {
6681: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6682: }
6683: }
6684: }
6685: }
6686: return(0);
6687: }
6689: /*@C
6690: MatCreateSubMatricesMPI - Extracts MPI submatrices across a sub communicator of mat (by pairs of IS that may live on subcomms).
6692: Collective on Mat
6694: Input Parameters:
6695: + mat - the matrix
6696: . n - the number of submatrixes to be extracted
6697: . irow, icol - index sets of rows and columns to extract
6698: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6700: Output Parameter:
6701: . submat - the array of submatrices
6703: Level: advanced
6705: Concepts: matrices^accessing submatrices
6706: Concepts: submatrices
6708: .seealso: MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6709: @*/
6710: PetscErrorCode MatCreateSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6711: {
6713: PetscInt i;
6714: PetscBool eq;
6719: if (n) {
6724: }
6726: if (n && scall == MAT_REUSE_MATRIX) {
6729: }
6730: if (!mat->ops->createsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6731: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6732: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6733: MatCheckPreallocated(mat,1);
6735: PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6736: (*mat->ops->createsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6737: PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6738: for (i=0; i<n; i++) {
6739: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6740: ISEqual(irow[i],icol[i],&eq);
6741: if (eq) {
6742: if (mat->symmetric) {
6743: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6744: } else if (mat->hermitian) {
6745: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6746: } else if (mat->structurally_symmetric) {
6747: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6748: }
6749: }
6750: }
6751: }
6752: return(0);
6753: }
6755: /*@C
6756: MatDestroyMatrices - Destroys an array of matrices.
6758: Collective on Mat
6760: Input Parameters:
6761: + n - the number of local matrices
6762: - mat - the matrices (note that this is a pointer to the array of matrices)
6764: Level: advanced
6766: Notes: Frees not only the matrices, but also the array that contains the matrices
6767: In Fortran will not free the array.
6769: .seealso: MatCreateSubMatrices() MatDestroySubMatrices()
6770: @*/
6771: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6772: {
6774: PetscInt i;
6777: if (!*mat) return(0);
6778: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6781: for (i=0; i<n; i++) {
6782: MatDestroy(&(*mat)[i]);
6783: }
6785: /* memory is allocated even if n = 0 */
6786: PetscFree(*mat);
6787: return(0);
6788: }
6790: /*@C
6791: MatDestroySubMatrices - Destroys a set of matrices obtained with MatCreateSubMatrices().
6793: Collective on Mat
6795: Input Parameters:
6796: + n - the number of local matrices
6797: - mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6798: sequence of MatCreateSubMatrices())
6800: Level: advanced
6802: Notes: Frees not only the matrices, but also the array that contains the matrices
6803: In Fortran will not free the array.
6805: .seealso: MatCreateSubMatrices()
6806: @*/
6807: PetscErrorCode MatDestroySubMatrices(PetscInt n,Mat *mat[])
6808: {
6810: Mat mat0;
6813: if (!*mat) return(0);
6814: /* mat[] is an array of length n+1, see MatCreateSubMatrices_xxx() */
6815: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6818: mat0 = (*mat)[0];
6819: if (mat0 && mat0->ops->destroysubmatrices) {
6820: (mat0->ops->destroysubmatrices)(n,mat);
6821: } else {
6822: MatDestroyMatrices(n,mat);
6823: }
6824: return(0);
6825: }
6827: /*@C
6828: MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.
6830: Collective on Mat
6832: Input Parameters:
6833: . mat - the matrix
6835: Output Parameter:
6836: . matstruct - the sequential matrix with the nonzero structure of mat
6838: Level: intermediate
6840: .seealso: MatDestroySeqNonzeroStructure(), MatCreateSubMatrices(), MatDestroyMatrices()
6841: @*/
6842: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6843: {
6851: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6852: MatCheckPreallocated(mat,1);
6854: if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6855: PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6856: (*mat->ops->getseqnonzerostructure)(mat,matstruct);
6857: PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6858: return(0);
6859: }
6861: /*@C
6862: MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().
6864: Collective on Mat
6866: Input Parameters:
6867: . mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
6868: sequence of MatGetSequentialNonzeroStructure())
6870: Level: advanced
6872: Notes: Frees not only the matrices, but also the array that contains the matrices
6874: .seealso: MatGetSeqNonzeroStructure()
6875: @*/
6876: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
6877: {
6882: MatDestroy(mat);
6883: return(0);
6884: }
6886: /*@
6887: MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
6888: replaces the index sets by larger ones that represent submatrices with
6889: additional overlap.
6891: Collective on Mat
6893: Input Parameters:
6894: + mat - the matrix
6895: . n - the number of index sets
6896: . is - the array of index sets (these index sets will changed during the call)
6897: - ov - the additional overlap requested
6899: Options Database:
6900: . -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)
6902: Level: developer
6904: Concepts: overlap
6905: Concepts: ASM^computing overlap
6907: .seealso: MatCreateSubMatrices()
6908: @*/
6909: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
6910: {
6916: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
6917: if (n) {
6920: }
6921: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6922: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6923: MatCheckPreallocated(mat,1);
6925: if (!ov) return(0);
6926: if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6927: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
6928: (*mat->ops->increaseoverlap)(mat,n,is,ov);
6929: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
6930: return(0);
6931: }
6934: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat,IS*,PetscInt);
6936: /*@
6937: MatIncreaseOverlapSplit - Given a set of submatrices indicated by index sets across
6938: a sub communicator, replaces the index sets by larger ones that represent submatrices with
6939: additional overlap.
6941: Collective on Mat
6943: Input Parameters:
6944: + mat - the matrix
6945: . n - the number of index sets
6946: . is - the array of index sets (these index sets will changed during the call)
6947: - ov - the additional overlap requested
6949: Options Database:
6950: . -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)
6952: Level: developer
6954: Concepts: overlap
6955: Concepts: ASM^computing overlap
6957: .seealso: MatCreateSubMatrices()
6958: @*/
6959: PetscErrorCode MatIncreaseOverlapSplit(Mat mat,PetscInt n,IS is[],PetscInt ov)
6960: {
6961: PetscInt i;
6967: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
6968: if (n) {
6971: }
6972: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6973: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6974: MatCheckPreallocated(mat,1);
6975: if (!ov) return(0);
6976: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
6977: for(i=0; i<n; i++){
6978: MatIncreaseOverlapSplit_Single(mat,&is[i],ov);
6979: }
6980: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
6981: return(0);
6982: }
6987: /*@
6988: MatGetBlockSize - Returns the matrix block size.
6990: Not Collective
6992: Input Parameter:
6993: . mat - the matrix
6995: Output Parameter:
6996: . bs - block size
6998: Notes:
6999: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7001: If the block size has not been set yet this routine returns 1.
7003: Level: intermediate
7005: Concepts: matrices^block size
7007: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
7008: @*/
7009: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
7010: {
7014: *bs = PetscAbs(mat->rmap->bs);
7015: return(0);
7016: }
7018: /*@
7019: MatGetBlockSizes - Returns the matrix block row and column sizes.
7021: Not Collective
7023: Input Parameter:
7024: . mat - the matrix
7026: Output Parameter:
7027: . rbs - row block size
7028: . cbs - column block size
7030: Notes:
7031: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7032: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7034: If a block size has not been set yet this routine returns 1.
7036: Level: intermediate
7038: Concepts: matrices^block size
7040: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
7041: @*/
7042: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
7043: {
7048: if (rbs) *rbs = PetscAbs(mat->rmap->bs);
7049: if (cbs) *cbs = PetscAbs(mat->cmap->bs);
7050: return(0);
7051: }
7053: /*@
7054: MatSetBlockSize - Sets the matrix block size.
7056: Logically Collective on Mat
7058: Input Parameters:
7059: + mat - the matrix
7060: - bs - block size
7062: Notes:
7063: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7064: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later.
7066: For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block size
7067: is compatible with the matrix local sizes.
7069: Level: intermediate
7071: Concepts: matrices^block size
7073: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
7074: @*/
7075: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
7076: {
7082: MatSetBlockSizes(mat,bs,bs);
7083: return(0);
7084: }
7086: /*@
7087: MatSetBlockSizes - Sets the matrix block row and column sizes.
7089: Logically Collective on Mat
7091: Input Parameters:
7092: + mat - the matrix
7093: - rbs - row block size
7094: - cbs - column block size
7096: Notes:
7097: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7098: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7099: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
7101: For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block sizes
7102: are compatible with the matrix local sizes.
7104: The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().
7106: Level: intermediate
7108: Concepts: matrices^block size
7110: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
7111: @*/
7112: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
7113: {
7120: if (mat->ops->setblocksizes) {
7121: (*mat->ops->setblocksizes)(mat,rbs,cbs);
7122: }
7123: if (mat->rmap->refcnt) {
7124: ISLocalToGlobalMapping l2g = NULL;
7125: PetscLayout nmap = NULL;
7127: PetscLayoutDuplicate(mat->rmap,&nmap);
7128: if (mat->rmap->mapping) {
7129: ISLocalToGlobalMappingDuplicate(mat->rmap->mapping,&l2g);
7130: }
7131: PetscLayoutDestroy(&mat->rmap);
7132: mat->rmap = nmap;
7133: mat->rmap->mapping = l2g;
7134: }
7135: if (mat->cmap->refcnt) {
7136: ISLocalToGlobalMapping l2g = NULL;
7137: PetscLayout nmap = NULL;
7139: PetscLayoutDuplicate(mat->cmap,&nmap);
7140: if (mat->cmap->mapping) {
7141: ISLocalToGlobalMappingDuplicate(mat->cmap->mapping,&l2g);
7142: }
7143: PetscLayoutDestroy(&mat->cmap);
7144: mat->cmap = nmap;
7145: mat->cmap->mapping = l2g;
7146: }
7147: PetscLayoutSetBlockSize(mat->rmap,rbs);
7148: PetscLayoutSetBlockSize(mat->cmap,cbs);
7149: return(0);
7150: }
7152: /*@
7153: MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices
7155: Logically Collective on Mat
7157: Input Parameters:
7158: + mat - the matrix
7159: . fromRow - matrix from which to copy row block size
7160: - fromCol - matrix from which to copy column block size (can be same as fromRow)
7162: Level: developer
7164: Concepts: matrices^block size
7166: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
7167: @*/
7168: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
7169: {
7176: if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
7177: if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
7178: return(0);
7179: }
7181: /*@
7182: MatResidual - Default routine to calculate the residual.
7184: Collective on Mat and Vec
7186: Input Parameters:
7187: + mat - the matrix
7188: . b - the right-hand-side
7189: - x - the approximate solution
7191: Output Parameter:
7192: . r - location to store the residual
7194: Level: developer
7196: .keywords: MG, default, multigrid, residual
7198: .seealso: PCMGSetResidual()
7199: @*/
7200: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7201: {
7210: MatCheckPreallocated(mat,1);
7211: PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7212: if (!mat->ops->residual) {
7213: MatMult(mat,x,r);
7214: VecAYPX(r,-1.0,b);
7215: } else {
7216: (*mat->ops->residual)(mat,b,x,r);
7217: }
7218: PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7219: return(0);
7220: }
7222: /*@C
7223: MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.
7225: Collective on Mat
7227: Input Parameters:
7228: + mat - the matrix
7229: . shift - 0 or 1 indicating we want the indices starting at 0 or 1
7230: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be symmetrized
7231: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7232: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7233: always used.
7235: Output Parameters:
7236: + n - number of rows in the (possibly compressed) matrix
7237: . ia - the row pointers [of length n+1]
7238: . ja - the column indices
7239: - done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7240: are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set
7242: Level: developer
7244: Notes:
7245: You CANNOT change any of the ia[] or ja[] values.
7247: Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values.
7249: Fortran Notes:
7250: In Fortran use
7251: $
7252: $ PetscInt ia(1), ja(1)
7253: $ PetscOffset iia, jja
7254: $ call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7255: $ ! Access the ith and jth entries via ia(iia + i) and ja(jja + j)
7257: or
7258: $
7259: $ PetscInt, pointer :: ia(:),ja(:)
7260: $ call MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7261: $ ! Access the ith and jth entries via ia(i) and ja(j)
7263: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7264: @*/
7265: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7266: {
7276: MatCheckPreallocated(mat,1);
7277: if (!mat->ops->getrowij) *done = PETSC_FALSE;
7278: else {
7279: *done = PETSC_TRUE;
7280: PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7281: (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7282: PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7283: }
7284: return(0);
7285: }
7287: /*@C
7288: MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.
7290: Collective on Mat
7292: Input Parameters:
7293: + mat - the matrix
7294: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7295: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7296: symmetrized
7297: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7298: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7299: always used.
7300: . n - number of columns in the (possibly compressed) matrix
7301: . ia - the column pointers
7302: - ja - the row indices
7304: Output Parameters:
7305: . done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned
7307: Note:
7308: This routine zeros out n, ia, and ja. This is to prevent accidental
7309: us of the array after it has been restored. If you pass NULL, it will
7310: not zero the pointers. Use of ia or ja after MatRestoreColumnIJ() is invalid.
7312: Level: developer
7314: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7315: @*/
7316: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7317: {
7327: MatCheckPreallocated(mat,1);
7328: if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7329: else {
7330: *done = PETSC_TRUE;
7331: (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7332: }
7333: return(0);
7334: }
7336: /*@C
7337: MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7338: MatGetRowIJ().
7340: Collective on Mat
7342: Input Parameters:
7343: + mat - the matrix
7344: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7345: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7346: symmetrized
7347: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7348: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7349: always used.
7350: . n - size of (possibly compressed) matrix
7351: . ia - the row pointers
7352: - ja - the column indices
7354: Output Parameters:
7355: . done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7357: Note:
7358: This routine zeros out n, ia, and ja. This is to prevent accidental
7359: us of the array after it has been restored. If you pass NULL, it will
7360: not zero the pointers. Use of ia or ja after MatRestoreRowIJ() is invalid.
7362: Level: developer
7364: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7365: @*/
7366: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7367: {
7376: MatCheckPreallocated(mat,1);
7378: if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7379: else {
7380: *done = PETSC_TRUE;
7381: (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7382: if (n) *n = 0;
7383: if (ia) *ia = NULL;
7384: if (ja) *ja = NULL;
7385: }
7386: return(0);
7387: }
7389: /*@C
7390: MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7391: MatGetColumnIJ().
7393: Collective on Mat
7395: Input Parameters:
7396: + mat - the matrix
7397: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7398: - symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7399: symmetrized
7400: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7401: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7402: always used.
7404: Output Parameters:
7405: + n - size of (possibly compressed) matrix
7406: . ia - the column pointers
7407: . ja - the row indices
7408: - done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7410: Level: developer
7412: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7413: @*/
7414: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7415: {
7424: MatCheckPreallocated(mat,1);
7426: if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7427: else {
7428: *done = PETSC_TRUE;
7429: (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7430: if (n) *n = 0;
7431: if (ia) *ia = NULL;
7432: if (ja) *ja = NULL;
7433: }
7434: return(0);
7435: }
7437: /*@C
7438: MatColoringPatch -Used inside matrix coloring routines that
7439: use MatGetRowIJ() and/or MatGetColumnIJ().
7441: Collective on Mat
7443: Input Parameters:
7444: + mat - the matrix
7445: . ncolors - max color value
7446: . n - number of entries in colorarray
7447: - colorarray - array indicating color for each column
7449: Output Parameters:
7450: . iscoloring - coloring generated using colorarray information
7452: Level: developer
7454: .seealso: MatGetRowIJ(), MatGetColumnIJ()
7456: @*/
7457: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7458: {
7466: MatCheckPreallocated(mat,1);
7468: if (!mat->ops->coloringpatch) {
7469: ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7470: } else {
7471: (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7472: }
7473: return(0);
7474: }
7477: /*@
7478: MatSetUnfactored - Resets a factored matrix to be treated as unfactored.
7480: Logically Collective on Mat
7482: Input Parameter:
7483: . mat - the factored matrix to be reset
7485: Notes:
7486: This routine should be used only with factored matrices formed by in-place
7487: factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7488: format). This option can save memory, for example, when solving nonlinear
7489: systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7490: ILU(0) preconditioner.
7492: Note that one can specify in-place ILU(0) factorization by calling
7493: .vb
7494: PCType(pc,PCILU);
7495: PCFactorSeUseInPlace(pc);
7496: .ve
7497: or by using the options -pc_type ilu -pc_factor_in_place
7499: In-place factorization ILU(0) can also be used as a local
7500: solver for the blocks within the block Jacobi or additive Schwarz
7501: methods (runtime option: -sub_pc_factor_in_place). See Users-Manual: ch_pc
7502: for details on setting local solver options.
7504: Most users should employ the simplified KSP interface for linear solvers
7505: instead of working directly with matrix algebra routines such as this.
7506: See, e.g., KSPCreate().
7508: Level: developer
7510: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()
7512: Concepts: matrices^unfactored
7514: @*/
7515: PetscErrorCode MatSetUnfactored(Mat mat)
7516: {
7522: MatCheckPreallocated(mat,1);
7523: mat->factortype = MAT_FACTOR_NONE;
7524: if (!mat->ops->setunfactored) return(0);
7525: (*mat->ops->setunfactored)(mat);
7526: return(0);
7527: }
7529: /*MC
7530: MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.
7532: Synopsis:
7533: MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7535: Not collective
7537: Input Parameter:
7538: . x - matrix
7540: Output Parameters:
7541: + xx_v - the Fortran90 pointer to the array
7542: - ierr - error code
7544: Example of Usage:
7545: .vb
7546: PetscScalar, pointer xx_v(:,:)
7547: ....
7548: call MatDenseGetArrayF90(x,xx_v,ierr)
7549: a = xx_v(3)
7550: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7551: .ve
7553: Level: advanced
7555: .seealso: MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()
7557: Concepts: matrices^accessing array
7559: M*/
7561: /*MC
7562: MatDenseRestoreArrayF90 - Restores a matrix array that has been
7563: accessed with MatDenseGetArrayF90().
7565: Synopsis:
7566: MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7568: Not collective
7570: Input Parameters:
7571: + x - matrix
7572: - xx_v - the Fortran90 pointer to the array
7574: Output Parameter:
7575: . ierr - error code
7577: Example of Usage:
7578: .vb
7579: PetscScalar, pointer xx_v(:,:)
7580: ....
7581: call MatDenseGetArrayF90(x,xx_v,ierr)
7582: a = xx_v(3)
7583: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7584: .ve
7586: Level: advanced
7588: .seealso: MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()
7590: M*/
7593: /*MC
7594: MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.
7596: Synopsis:
7597: MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7599: Not collective
7601: Input Parameter:
7602: . x - matrix
7604: Output Parameters:
7605: + xx_v - the Fortran90 pointer to the array
7606: - ierr - error code
7608: Example of Usage:
7609: .vb
7610: PetscScalar, pointer xx_v(:)
7611: ....
7612: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7613: a = xx_v(3)
7614: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7615: .ve
7617: Level: advanced
7619: .seealso: MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()
7621: Concepts: matrices^accessing array
7623: M*/
7625: /*MC
7626: MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7627: accessed with MatSeqAIJGetArrayF90().
7629: Synopsis:
7630: MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7632: Not collective
7634: Input Parameters:
7635: + x - matrix
7636: - xx_v - the Fortran90 pointer to the array
7638: Output Parameter:
7639: . ierr - error code
7641: Example of Usage:
7642: .vb
7643: PetscScalar, pointer xx_v(:)
7644: ....
7645: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7646: a = xx_v(3)
7647: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7648: .ve
7650: Level: advanced
7652: .seealso: MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()
7654: M*/
7657: /*@
7658: MatCreateSubMatrix - Gets a single submatrix on the same number of processors
7659: as the original matrix.
7661: Collective on Mat
7663: Input Parameters:
7664: + mat - the original matrix
7665: . isrow - parallel IS containing the rows this processor should obtain
7666: . iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7667: - cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
7669: Output Parameter:
7670: . newmat - the new submatrix, of the same type as the old
7672: Level: advanced
7674: Notes:
7675: The submatrix will be able to be multiplied with vectors using the same layout as iscol.
7677: Some matrix types place restrictions on the row and column indices, such
7678: as that they be sorted or that they be equal to each other.
7680: The index sets may not have duplicate entries.
7682: The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7683: the MatCreateSubMatrix() routine will create the newmat for you. Any additional calls
7684: to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7685: will reuse the matrix generated the first time. You should call MatDestroy() on newmat when
7686: you are finished using it.
7688: The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7689: the input matrix.
7691: If iscol is NULL then all columns are obtained (not supported in Fortran).
7693: Example usage:
7694: Consider the following 8x8 matrix with 34 non-zero values, that is
7695: assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7696: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7697: as follows:
7699: .vb
7700: 1 2 0 | 0 3 0 | 0 4
7701: Proc0 0 5 6 | 7 0 0 | 8 0
7702: 9 0 10 | 11 0 0 | 12 0
7703: -------------------------------------
7704: 13 0 14 | 15 16 17 | 0 0
7705: Proc1 0 18 0 | 19 20 21 | 0 0
7706: 0 0 0 | 22 23 0 | 24 0
7707: -------------------------------------
7708: Proc2 25 26 27 | 0 0 28 | 29 0
7709: 30 0 0 | 31 32 33 | 0 34
7710: .ve
7712: Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6]. The resulting submatrix is
7714: .vb
7715: 2 0 | 0 3 0 | 0
7716: Proc0 5 6 | 7 0 0 | 8
7717: -------------------------------
7718: Proc1 18 0 | 19 20 21 | 0
7719: -------------------------------
7720: Proc2 26 27 | 0 0 28 | 29
7721: 0 0 | 31 32 33 | 0
7722: .ve
7725: Concepts: matrices^submatrices
7727: .seealso: MatCreateSubMatrices()
7728: @*/
7729: PetscErrorCode MatCreateSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7730: {
7732: PetscMPIInt size;
7733: Mat *local;
7734: IS iscoltmp;
7743: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7744: if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");
7746: MatCheckPreallocated(mat,1);
7747: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
7749: if (!iscol || isrow == iscol) {
7750: PetscBool stride;
7751: PetscMPIInt grabentirematrix = 0,grab;
7752: PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7753: if (stride) {
7754: PetscInt first,step,n,rstart,rend;
7755: ISStrideGetInfo(isrow,&first,&step);
7756: if (step == 1) {
7757: MatGetOwnershipRange(mat,&rstart,&rend);
7758: if (rstart == first) {
7759: ISGetLocalSize(isrow,&n);
7760: if (n == rend-rstart) {
7761: grabentirematrix = 1;
7762: }
7763: }
7764: }
7765: }
7766: MPIU_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7767: if (grab) {
7768: PetscInfo(mat,"Getting entire matrix as submatrix\n");
7769: if (cll == MAT_INITIAL_MATRIX) {
7770: *newmat = mat;
7771: PetscObjectReference((PetscObject)mat);
7772: }
7773: return(0);
7774: }
7775: }
7777: if (!iscol) {
7778: ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7779: } else {
7780: iscoltmp = iscol;
7781: }
7783: /* if original matrix is on just one processor then use submatrix generated */
7784: if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7785: MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7786: if (!iscol) {ISDestroy(&iscoltmp);}
7787: return(0);
7788: } else if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1) {
7789: MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7790: *newmat = *local;
7791: PetscFree(local);
7792: if (!iscol) {ISDestroy(&iscoltmp);}
7793: return(0);
7794: } else if (!mat->ops->createsubmatrix) {
7795: /* Create a new matrix type that implements the operation using the full matrix */
7796: PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7797: switch (cll) {
7798: case MAT_INITIAL_MATRIX:
7799: MatCreateSubMatrixVirtual(mat,isrow,iscoltmp,newmat);
7800: break;
7801: case MAT_REUSE_MATRIX:
7802: MatSubMatrixVirtualUpdate(*newmat,mat,isrow,iscoltmp);
7803: break;
7804: default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7805: }
7806: PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7807: if (!iscol) {ISDestroy(&iscoltmp);}
7808: return(0);
7809: }
7811: if (!mat->ops->createsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7812: PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7813: (*mat->ops->createsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7814: PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7815: if (!iscol) {ISDestroy(&iscoltmp);}
7816: if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
7817: return(0);
7818: }
7820: /*@
7821: MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7822: used during the assembly process to store values that belong to
7823: other processors.
7825: Not Collective
7827: Input Parameters:
7828: + mat - the matrix
7829: . size - the initial size of the stash.
7830: - bsize - the initial size of the block-stash(if used).
7832: Options Database Keys:
7833: + -matstash_initial_size <size> or <size0,size1,...sizep-1>
7834: - -matstash_block_initial_size <bsize> or <bsize0,bsize1,...bsizep-1>
7836: Level: intermediate
7838: Notes:
7839: The block-stash is used for values set with MatSetValuesBlocked() while
7840: the stash is used for values set with MatSetValues()
7842: Run with the option -info and look for output of the form
7843: MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7844: to determine the appropriate value, MM, to use for size and
7845: MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7846: to determine the value, BMM to use for bsize
7848: Concepts: stash^setting matrix size
7849: Concepts: matrices^stash
7851: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()
7853: @*/
7854: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7855: {
7861: MatStashSetInitialSize_Private(&mat->stash,size);
7862: MatStashSetInitialSize_Private(&mat->bstash,bsize);
7863: return(0);
7864: }
7866: /*@
7867: MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
7868: the matrix
7870: Neighbor-wise Collective on Mat
7872: Input Parameters:
7873: + mat - the matrix
7874: . x,y - the vectors
7875: - w - where the result is stored
7877: Level: intermediate
7879: Notes:
7880: w may be the same vector as y.
7882: This allows one to use either the restriction or interpolation (its transpose)
7883: matrix to do the interpolation
7885: Concepts: interpolation
7887: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7889: @*/
7890: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
7891: {
7893: PetscInt M,N,Ny;
7901: MatCheckPreallocated(A,1);
7902: MatGetSize(A,&M,&N);
7903: VecGetSize(y,&Ny);
7904: if (M == Ny) {
7905: MatMultAdd(A,x,y,w);
7906: } else {
7907: MatMultTransposeAdd(A,x,y,w);
7908: }
7909: return(0);
7910: }
7912: /*@
7913: MatInterpolate - y = A*x or A'*x depending on the shape of
7914: the matrix
7916: Neighbor-wise Collective on Mat
7918: Input Parameters:
7919: + mat - the matrix
7920: - x,y - the vectors
7922: Level: intermediate
7924: Notes:
7925: This allows one to use either the restriction or interpolation (its transpose)
7926: matrix to do the interpolation
7928: Concepts: matrices^interpolation
7930: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7932: @*/
7933: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
7934: {
7936: PetscInt M,N,Ny;
7943: MatCheckPreallocated(A,1);
7944: MatGetSize(A,&M,&N);
7945: VecGetSize(y,&Ny);
7946: if (M == Ny) {
7947: MatMult(A,x,y);
7948: } else {
7949: MatMultTranspose(A,x,y);
7950: }
7951: return(0);
7952: }
7954: /*@
7955: MatRestrict - y = A*x or A'*x
7957: Neighbor-wise Collective on Mat
7959: Input Parameters:
7960: + mat - the matrix
7961: - x,y - the vectors
7963: Level: intermediate
7965: Notes:
7966: This allows one to use either the restriction or interpolation (its transpose)
7967: matrix to do the restriction
7969: Concepts: matrices^restriction
7971: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()
7973: @*/
7974: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
7975: {
7977: PetscInt M,N,Ny;
7984: MatCheckPreallocated(A,1);
7986: MatGetSize(A,&M,&N);
7987: VecGetSize(y,&Ny);
7988: if (M == Ny) {
7989: MatMult(A,x,y);
7990: } else {
7991: MatMultTranspose(A,x,y);
7992: }
7993: return(0);
7994: }
7996: /*@C
7997: MatGetNullSpace - retrieves the null space to a matrix.
7999: Logically Collective on Mat and MatNullSpace
8001: Input Parameters:
8002: + mat - the matrix
8003: - nullsp - the null space object
8005: Level: developer
8007: Concepts: null space^attaching to matrix
8009: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
8010: @*/
8011: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
8012: {
8016: *nullsp = mat->nullsp;
8017: return(0);
8018: }
8020: /*@C
8021: MatSetNullSpace - attaches a null space to a matrix.
8023: Logically Collective on Mat and MatNullSpace
8025: Input Parameters:
8026: + mat - the matrix
8027: - nullsp - the null space object
8029: Level: advanced
8031: Notes:
8032: This null space is used by the linear solvers. Overwrites any previous null space that may have been attached
8034: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
8035: call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.
8037: You can remove the null space by calling this routine with an nullsp of NULL
8040: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8041: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8042: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8043: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8044: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
8046: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
8048: If the matrix is known to be symmetric because it is an SBAIJ matrix or one as called MatSetOption(mat,MAT_SYMMETRIC or MAT_SYMMETRIC_ETERNAL,PETSC_TRUE); this
8049: routine also automatically calls MatSetTransposeNullSpace().
8051: Concepts: null space^attaching to matrix
8053: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8054: @*/
8055: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
8056: {
8062: if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8063: MatNullSpaceDestroy(&mat->nullsp);
8064: mat->nullsp = nullsp;
8065: if (mat->symmetric_set && mat->symmetric) {
8066: MatSetTransposeNullSpace(mat,nullsp);
8067: }
8068: return(0);
8069: }
8071: /*@
8072: MatGetTransposeNullSpace - retrieves the null space of the transpose of a matrix.
8074: Logically Collective on Mat and MatNullSpace
8076: Input Parameters:
8077: + mat - the matrix
8078: - nullsp - the null space object
8080: Level: developer
8082: Concepts: null space^attaching to matrix
8084: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetTransposeNullSpace(), MatSetNullSpace(), MatGetNullSpace()
8085: @*/
8086: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
8087: {
8092: *nullsp = mat->transnullsp;
8093: return(0);
8094: }
8096: /*@
8097: MatSetTransposeNullSpace - attaches a null space to a matrix.
8099: Logically Collective on Mat and MatNullSpace
8101: Input Parameters:
8102: + mat - the matrix
8103: - nullsp - the null space object
8105: Level: advanced
8107: Notes:
8108: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
8109: You must also call MatSetNullSpace()
8112: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8113: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8114: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8115: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8116: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
8118: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
8120: Concepts: null space^attaching to matrix
8122: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8123: @*/
8124: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
8125: {
8132: MatCheckPreallocated(mat,1);
8133: PetscObjectReference((PetscObject)nullsp);
8134: MatNullSpaceDestroy(&mat->transnullsp);
8135: mat->transnullsp = nullsp;
8136: return(0);
8137: }
8139: /*@
8140: MatSetNearNullSpace - attaches a null space to a matrix, which is often the null space (rigid body modes) of the operator without boundary conditions
8141: This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.
8143: Logically Collective on Mat and MatNullSpace
8145: Input Parameters:
8146: + mat - the matrix
8147: - nullsp - the null space object
8149: Level: advanced
8151: Notes:
8152: Overwrites any previous near null space that may have been attached
8154: You can remove the null space by calling this routine with an nullsp of NULL
8156: Concepts: null space^attaching to matrix
8158: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace(), MatNullSpaceCreateRigidBody(), MatGetNearNullSpace()
8159: @*/
8160: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8161: {
8168: MatCheckPreallocated(mat,1);
8169: if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8170: MatNullSpaceDestroy(&mat->nearnullsp);
8171: mat->nearnullsp = nullsp;
8172: return(0);
8173: }
8175: /*@
8176: MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()
8178: Not Collective
8180: Input Parameters:
8181: . mat - the matrix
8183: Output Parameters:
8184: . nullsp - the null space object, NULL if not set
8186: Level: developer
8188: Concepts: null space^attaching to matrix
8190: .seealso: MatSetNearNullSpace(), MatGetNullSpace(), MatNullSpaceCreate()
8191: @*/
8192: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8193: {
8198: MatCheckPreallocated(mat,1);
8199: *nullsp = mat->nearnullsp;
8200: return(0);
8201: }
8203: /*@C
8204: MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.
8206: Collective on Mat
8208: Input Parameters:
8209: + mat - the matrix
8210: . row - row/column permutation
8211: . fill - expected fill factor >= 1.0
8212: - level - level of fill, for ICC(k)
8214: Notes:
8215: Probably really in-place only when level of fill is zero, otherwise allocates
8216: new space to store factored matrix and deletes previous memory.
8218: Most users should employ the simplified KSP interface for linear solvers
8219: instead of working directly with matrix algebra routines such as this.
8220: See, e.g., KSPCreate().
8222: Level: developer
8224: Concepts: matrices^incomplete Cholesky factorization
8225: Concepts: Cholesky factorization
8227: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
8229: Developer Note: fortran interface is not autogenerated as the f90
8230: interface defintion cannot be generated correctly [due to MatFactorInfo]
8232: @*/
8233: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8234: {
8242: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8243: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8244: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8245: if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8246: MatCheckPreallocated(mat,1);
8247: (*mat->ops->iccfactor)(mat,row,info);
8248: PetscObjectStateIncrease((PetscObject)mat);
8249: return(0);
8250: }
8252: /*@
8253: MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8254: ghosted ones.
8256: Not Collective
8258: Input Parameters:
8259: + mat - the matrix
8260: - diag = the diagonal values, including ghost ones
8262: Level: developer
8264: Notes: Works only for MPIAIJ and MPIBAIJ matrices
8266: .seealso: MatDiagonalScale()
8267: @*/
8268: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8269: {
8271: PetscMPIInt size;
8278: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8279: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8280: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8281: if (size == 1) {
8282: PetscInt n,m;
8283: VecGetSize(diag,&n);
8284: MatGetSize(mat,0,&m);
8285: if (m == n) {
8286: MatDiagonalScale(mat,0,diag);
8287: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8288: } else {
8289: PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8290: }
8291: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8292: PetscObjectStateIncrease((PetscObject)mat);
8293: return(0);
8294: }
8296: /*@
8297: MatGetInertia - Gets the inertia from a factored matrix
8299: Collective on Mat
8301: Input Parameter:
8302: . mat - the matrix
8304: Output Parameters:
8305: + nneg - number of negative eigenvalues
8306: . nzero - number of zero eigenvalues
8307: - npos - number of positive eigenvalues
8309: Level: advanced
8311: Notes: Matrix must have been factored by MatCholeskyFactor()
8314: @*/
8315: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8316: {
8322: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8323: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8324: if (!mat->ops->getinertia) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8325: (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8326: return(0);
8327: }
8329: /* ----------------------------------------------------------------*/
8330: /*@C
8331: MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors
8333: Neighbor-wise Collective on Mat and Vecs
8335: Input Parameters:
8336: + mat - the factored matrix
8337: - b - the right-hand-side vectors
8339: Output Parameter:
8340: . x - the result vectors
8342: Notes:
8343: The vectors b and x cannot be the same. I.e., one cannot
8344: call MatSolves(A,x,x).
8346: Notes:
8347: Most users should employ the simplified KSP interface for linear solvers
8348: instead of working directly with matrix algebra routines such as this.
8349: See, e.g., KSPCreate().
8351: Level: developer
8353: Concepts: matrices^triangular solves
8355: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8356: @*/
8357: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8358: {
8364: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8365: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8366: if (!mat->rmap->N && !mat->cmap->N) return(0);
8368: if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8369: MatCheckPreallocated(mat,1);
8370: PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8371: (*mat->ops->solves)(mat,b,x);
8372: PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8373: return(0);
8374: }
8376: /*@
8377: MatIsSymmetric - Test whether a matrix is symmetric
8379: Collective on Mat
8381: Input Parameter:
8382: + A - the matrix to test
8383: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)
8385: Output Parameters:
8386: . flg - the result
8388: Notes: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results
8390: Level: intermediate
8392: Concepts: matrix^symmetry
8394: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8395: @*/
8396: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool *flg)
8397: {
8404: if (!A->symmetric_set) {
8405: if (!A->ops->issymmetric) {
8406: MatType mattype;
8407: MatGetType(A,&mattype);
8408: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8409: }
8410: (*A->ops->issymmetric)(A,tol,flg);
8411: if (!tol) {
8412: A->symmetric_set = PETSC_TRUE;
8413: A->symmetric = *flg;
8414: if (A->symmetric) {
8415: A->structurally_symmetric_set = PETSC_TRUE;
8416: A->structurally_symmetric = PETSC_TRUE;
8417: }
8418: }
8419: } else if (A->symmetric) {
8420: *flg = PETSC_TRUE;
8421: } else if (!tol) {
8422: *flg = PETSC_FALSE;
8423: } else {
8424: if (!A->ops->issymmetric) {
8425: MatType mattype;
8426: MatGetType(A,&mattype);
8427: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8428: }
8429: (*A->ops->issymmetric)(A,tol,flg);
8430: }
8431: return(0);
8432: }
8434: /*@
8435: MatIsHermitian - Test whether a matrix is Hermitian
8437: Collective on Mat
8439: Input Parameter:
8440: + A - the matrix to test
8441: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)
8443: Output Parameters:
8444: . flg - the result
8446: Level: intermediate
8448: Concepts: matrix^symmetry
8450: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8451: MatIsSymmetricKnown(), MatIsSymmetric()
8452: @*/
8453: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool *flg)
8454: {
8461: if (!A->hermitian_set) {
8462: if (!A->ops->ishermitian) {
8463: MatType mattype;
8464: MatGetType(A,&mattype);
8465: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8466: }
8467: (*A->ops->ishermitian)(A,tol,flg);
8468: if (!tol) {
8469: A->hermitian_set = PETSC_TRUE;
8470: A->hermitian = *flg;
8471: if (A->hermitian) {
8472: A->structurally_symmetric_set = PETSC_TRUE;
8473: A->structurally_symmetric = PETSC_TRUE;
8474: }
8475: }
8476: } else if (A->hermitian) {
8477: *flg = PETSC_TRUE;
8478: } else if (!tol) {
8479: *flg = PETSC_FALSE;
8480: } else {
8481: if (!A->ops->ishermitian) {
8482: MatType mattype;
8483: MatGetType(A,&mattype);
8484: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8485: }
8486: (*A->ops->ishermitian)(A,tol,flg);
8487: }
8488: return(0);
8489: }
8491: /*@
8492: MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.
8494: Not Collective
8496: Input Parameter:
8497: . A - the matrix to check
8499: Output Parameters:
8500: + set - if the symmetric flag is set (this tells you if the next flag is valid)
8501: - flg - the result
8503: Level: advanced
8505: Concepts: matrix^symmetry
8507: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8508: if you want it explicitly checked
8510: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8511: @*/
8512: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool *set,PetscBool *flg)
8513: {
8518: if (A->symmetric_set) {
8519: *set = PETSC_TRUE;
8520: *flg = A->symmetric;
8521: } else {
8522: *set = PETSC_FALSE;
8523: }
8524: return(0);
8525: }
8527: /*@
8528: MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.
8530: Not Collective
8532: Input Parameter:
8533: . A - the matrix to check
8535: Output Parameters:
8536: + set - if the hermitian flag is set (this tells you if the next flag is valid)
8537: - flg - the result
8539: Level: advanced
8541: Concepts: matrix^symmetry
8543: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8544: if you want it explicitly checked
8546: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8547: @*/
8548: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool *set,PetscBool *flg)
8549: {
8554: if (A->hermitian_set) {
8555: *set = PETSC_TRUE;
8556: *flg = A->hermitian;
8557: } else {
8558: *set = PETSC_FALSE;
8559: }
8560: return(0);
8561: }
8563: /*@
8564: MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric
8566: Collective on Mat
8568: Input Parameter:
8569: . A - the matrix to test
8571: Output Parameters:
8572: . flg - the result
8574: Level: intermediate
8576: Concepts: matrix^symmetry
8578: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8579: @*/
8580: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool *flg)
8581: {
8587: if (!A->structurally_symmetric_set) {
8588: if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8589: (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);
8591: A->structurally_symmetric_set = PETSC_TRUE;
8592: }
8593: *flg = A->structurally_symmetric;
8594: return(0);
8595: }
8597: /*@
8598: MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8599: to be communicated to other processors during the MatAssemblyBegin/End() process
8601: Not collective
8603: Input Parameter:
8604: . vec - the vector
8606: Output Parameters:
8607: + nstash - the size of the stash
8608: . reallocs - the number of additional mallocs incurred.
8609: . bnstash - the size of the block stash
8610: - breallocs - the number of additional mallocs incurred.in the block stash
8612: Level: advanced
8614: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()
8616: @*/
8617: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8618: {
8622: MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8623: MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8624: return(0);
8625: }
8627: /*@C
8628: MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8629: parallel layout
8631: Collective on Mat
8633: Input Parameter:
8634: . mat - the matrix
8636: Output Parameter:
8637: + right - (optional) vector that the matrix can be multiplied against
8638: - left - (optional) vector that the matrix vector product can be stored in
8640: Notes:
8641: The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().
8643: Notes: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed
8645: Level: advanced
8647: .seealso: MatCreate(), VecDestroy()
8648: @*/
8649: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8650: {
8656: if (mat->ops->getvecs) {
8657: (*mat->ops->getvecs)(mat,right,left);
8658: } else {
8659: PetscInt rbs,cbs;
8660: MatGetBlockSizes(mat,&rbs,&cbs);
8661: if (right) {
8662: if (mat->cmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for columns not yet setup");
8663: VecCreate(PetscObjectComm((PetscObject)mat),right);
8664: VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8665: VecSetBlockSize(*right,cbs);
8666: VecSetType(*right,VECSTANDARD);
8667: PetscLayoutReference(mat->cmap,&(*right)->map);
8668: }
8669: if (left) {
8670: if (mat->rmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for rows not yet setup");
8671: VecCreate(PetscObjectComm((PetscObject)mat),left);
8672: VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8673: VecSetBlockSize(*left,rbs);
8674: VecSetType(*left,VECSTANDARD);
8675: PetscLayoutReference(mat->rmap,&(*left)->map);
8676: }
8677: }
8678: return(0);
8679: }
8681: /*@C
8682: MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8683: with default values.
8685: Not Collective
8687: Input Parameters:
8688: . info - the MatFactorInfo data structure
8691: Notes: The solvers are generally used through the KSP and PC objects, for example
8692: PCLU, PCILU, PCCHOLESKY, PCICC
8694: Level: developer
8696: .seealso: MatFactorInfo
8698: Developer Note: fortran interface is not autogenerated as the f90
8699: interface defintion cannot be generated correctly [due to MatFactorInfo]
8701: @*/
8703: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8704: {
8708: PetscMemzero(info,sizeof(MatFactorInfo));
8709: return(0);
8710: }
8712: /*@
8713: MatFactorSetSchurIS - Set indices corresponding to the Schur complement you wish to have computed
8715: Collective on Mat
8717: Input Parameters:
8718: + mat - the factored matrix
8719: - is - the index set defining the Schur indices (0-based)
8721: Notes: Call MatFactorSolveSchurComplement() or MatFactorSolveSchurComplementTranspose() after this call to solve a Schur complement system.
8723: You can call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() after this call.
8725: Level: developer
8727: Concepts:
8729: .seealso: MatGetFactor(), MatFactorGetSchurComplement(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSolveSchurComplement(),
8730: MatFactorSolveSchurComplementTranspose(), MatFactorSolveSchurComplement()
8732: @*/
8733: PetscErrorCode MatFactorSetSchurIS(Mat mat,IS is)
8734: {
8735: PetscErrorCode ierr,(*f)(Mat,IS);
8743: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
8744: PetscObjectQueryFunction((PetscObject)mat,"MatFactorSetSchurIS_C",&f);
8745: if (!f) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"The selected MatSolverType does not support Schur complement computation. You should use MATSOLVERMUMPS or MATSOLVERMKL_PARDISO");
8746: if (mat->schur) {
8747: MatDestroy(&mat->schur);
8748: }
8749: (*f)(mat,is);
8750: if (!mat->schur) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_PLIB,"Schur complement has not been created");
8751: MatFactorSetUpInPlaceSchur_Private(mat);
8752: return(0);
8753: }
8755: /*@
8756: MatFactorCreateSchurComplement - Create a Schur complement matrix object using Schur data computed during the factorization step
8758: Logically Collective on Mat
8760: Input Parameters:
8761: + F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
8762: . S - location where to return the Schur complement, can be NULL
8763: - status - the status of the Schur complement matrix, can be NULL
8765: Notes:
8766: You must call MatFactorSetSchurIS() before calling this routine.
8768: The routine provides a copy of the Schur matrix stored within the solver data structures.
8769: The caller must destroy the object when it is no longer needed.
8770: If MatFactorInvertSchurComplement() has been called, the routine gets back the inverse.
8772: Use MatFactorGetSchurComplement() to get access to the Schur complement matrix inside the factored matrix instead of making a copy of it (which this function does)
8774: Developer Notes: The reason this routine exists is because the representation of the Schur complement within the factor matrix may be different than a standard PETSc
8775: matrix representation and we normally do not want to use the time or memory to make a copy as a regular PETSc matrix.
8777: See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.
8779: Level: advanced
8781: References:
8783: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorSchurStatus
8784: @*/
8785: PetscErrorCode MatFactorCreateSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8786: {
8793: if (S) {
8794: PetscErrorCode (*f)(Mat,Mat*);
8796: PetscObjectQueryFunction((PetscObject)F,"MatFactorCreateSchurComplement_C",&f);
8797: if (f) {
8798: (*f)(F,S);
8799: } else {
8800: MatDuplicate(F->schur,MAT_COPY_VALUES,S);
8801: }
8802: }
8803: if (status) *status = F->schur_status;
8804: return(0);
8805: }
8807: /*@
8808: MatFactorGetSchurComplement - Gets access to a Schur complement matrix using the current Schur data within a factored matrix
8810: Logically Collective on Mat
8812: Input Parameters:
8813: + F - the factored matrix obtained by calling MatGetFactor()
8814: . *S - location where to return the Schur complement, can be NULL
8815: - status - the status of the Schur complement matrix, can be NULL
8817: Notes:
8818: You must call MatFactorSetSchurIS() before calling this routine.
8820: Schur complement mode is currently implemented for sequential matrices.
8821: The routine returns a the Schur Complement stored within the data strutures of the solver.
8822: If MatFactorInvertSchurComplement() has previously been called, the returned matrix is actually the inverse of the Schur complement.
8823: The returned matrix should not be destroyed; the caller should call MatFactorRestoreSchurComplement() when the object is no longer needed.
8825: Use MatFactorCreateSchurComplement() to create a copy of the Schur complement matrix that is within a factored matrix
8827: See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.
8829: Level: advanced
8831: References:
8833: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8834: @*/
8835: PetscErrorCode MatFactorGetSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8836: {
8841: if (S) *S = F->schur;
8842: if (status) *status = F->schur_status;
8843: return(0);
8844: }
8846: /*@
8847: MatFactorRestoreSchurComplement - Restore the Schur complement matrix object obtained from a call to MatFactorGetSchurComplement
8849: Logically Collective on Mat
8851: Input Parameters:
8852: + F - the factored matrix obtained by calling MatGetFactor()
8853: . *S - location where the Schur complement is stored
8854: - status - the status of the Schur complement matrix (see MatFactorSchurStatus)
8856: Notes:
8858: Level: advanced
8860: References:
8862: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8863: @*/
8864: PetscErrorCode MatFactorRestoreSchurComplement(Mat F,Mat* S,MatFactorSchurStatus status)
8865: {
8870: if (S) {
8872: *S = NULL;
8873: }
8874: F->schur_status = status;
8875: MatFactorUpdateSchurStatus_Private(F);
8876: return(0);
8877: }
8879: /*@
8880: MatFactorSolveSchurComplementTranspose - Solve the transpose of the Schur complement system computed during the factorization step
8882: Logically Collective on Mat
8884: Input Parameters:
8885: + F - the factored matrix obtained by calling MatGetFactor()
8886: . rhs - location where the right hand side of the Schur complement system is stored
8887: - sol - location where the solution of the Schur complement system has to be returned
8889: Notes:
8890: The sizes of the vectors should match the size of the Schur complement
8892: Must be called after MatFactorSetSchurIS()
8894: Level: advanced
8896: References:
8898: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplement()
8899: @*/
8900: PetscErrorCode MatFactorSolveSchurComplementTranspose(Mat F, Vec rhs, Vec sol)
8901: {
8913: MatFactorFactorizeSchurComplement(F);
8914: switch (F->schur_status) {
8915: case MAT_FACTOR_SCHUR_FACTORED:
8916: MatSolveTranspose(F->schur,rhs,sol);
8917: break;
8918: case MAT_FACTOR_SCHUR_INVERTED:
8919: MatMultTranspose(F->schur,rhs,sol);
8920: break;
8921: default:
8922: SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
8923: break;
8924: }
8925: return(0);
8926: }
8928: /*@
8929: MatFactorSolveSchurComplement - Solve the Schur complement system computed during the factorization step
8931: Logically Collective on Mat
8933: Input Parameters:
8934: + F - the factored matrix obtained by calling MatGetFactor()
8935: . rhs - location where the right hand side of the Schur complement system is stored
8936: - sol - location where the solution of the Schur complement system has to be returned
8938: Notes:
8939: The sizes of the vectors should match the size of the Schur complement
8941: Must be called after MatFactorSetSchurIS()
8943: Level: advanced
8945: References:
8947: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplementTranspose()
8948: @*/
8949: PetscErrorCode MatFactorSolveSchurComplement(Mat F, Vec rhs, Vec sol)
8950: {
8962: MatFactorFactorizeSchurComplement(F);
8963: switch (F->schur_status) {
8964: case MAT_FACTOR_SCHUR_FACTORED:
8965: MatSolve(F->schur,rhs,sol);
8966: break;
8967: case MAT_FACTOR_SCHUR_INVERTED:
8968: MatMult(F->schur,rhs,sol);
8969: break;
8970: default:
8971: SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
8972: break;
8973: }
8974: return(0);
8975: }
8977: /*@
8978: MatFactorInvertSchurComplement - Invert the Schur complement matrix computed during the factorization step
8980: Logically Collective on Mat
8982: Input Parameters:
8983: + F - the factored matrix obtained by calling MatGetFactor()
8985: Notes: Must be called after MatFactorSetSchurIS().
8987: Call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() AFTER this call to actually compute the inverse and get access to it.
8989: Level: advanced
8991: References:
8993: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorCreateSchurComplement()
8994: @*/
8995: PetscErrorCode MatFactorInvertSchurComplement(Mat F)
8996: {
9002: if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED) return(0);
9003: MatFactorFactorizeSchurComplement(F);
9004: MatFactorInvertSchurComplement_Private(F);
9005: F->schur_status = MAT_FACTOR_SCHUR_INVERTED;
9006: return(0);
9007: }
9009: /*@
9010: MatFactorFactorizeSchurComplement - Factorize the Schur complement matrix computed during the factorization step
9012: Logically Collective on Mat
9014: Input Parameters:
9015: + F - the factored matrix obtained by calling MatGetFactor()
9017: Notes: Must be called after MatFactorSetSchurIS().
9019: Level: advanced
9021: References:
9023: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorInvertSchurComplement()
9024: @*/
9025: PetscErrorCode MatFactorFactorizeSchurComplement(Mat F)
9026: {
9032: if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED || F->schur_status == MAT_FACTOR_SCHUR_FACTORED) return(0);
9033: MatFactorFactorizeSchurComplement_Private(F);
9034: F->schur_status = MAT_FACTOR_SCHUR_FACTORED;
9035: return(0);
9036: }
9038: /*@
9039: MatPtAP - Creates the matrix product C = P^T * A * P
9041: Neighbor-wise Collective on Mat
9043: Input Parameters:
9044: + A - the matrix
9045: . P - the projection matrix
9046: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9047: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)), use PETSC_DEFAULT if you do not have a good estimate
9048: if the result is a dense matrix this is irrelevent
9050: Output Parameters:
9051: . C - the product matrix
9053: Notes:
9054: C will be created and must be destroyed by the user with MatDestroy().
9056: This routine is currently only implemented for pairs of sequential dense matrices, AIJ matrices and classes
9057: which inherit from AIJ.
9059: Level: intermediate
9061: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
9062: @*/
9063: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
9064: {
9066: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9067: PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
9068: PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9073: MatCheckPreallocated(A,1);
9074: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9075: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9076: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9079: MatCheckPreallocated(P,2);
9080: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9081: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9083: if (A->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix A must be square, %D != %D",A->rmap->N,A->cmap->N);
9084: if (P->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9085: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9086: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9088: if (scall == MAT_REUSE_MATRIX) {
9092: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9093: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9094: (*(*C)->ops->ptapnumeric)(A,P,*C);
9095: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9096: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9097: return(0);
9098: }
9100: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9101: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9103: fA = A->ops->ptap;
9104: fP = P->ops->ptap;
9105: if (fP == fA) {
9106: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
9107: ptap = fA;
9108: } else {
9109: /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
9110: char ptapname[256];
9111: PetscStrncpy(ptapname,"MatPtAP_",sizeof(ptapname));
9112: PetscStrlcat(ptapname,((PetscObject)A)->type_name,sizeof(ptapname));
9113: PetscStrlcat(ptapname,"_",sizeof(ptapname));
9114: PetscStrlcat(ptapname,((PetscObject)P)->type_name,sizeof(ptapname));
9115: PetscStrlcat(ptapname,"_C",sizeof(ptapname)); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
9116: PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
9117: if (!ptap) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s (Misses composed function %s)",((PetscObject)A)->type_name,((PetscObject)P)->type_name,ptapname);
9118: }
9120: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9121: (*ptap)(A,P,scall,fill,C);
9122: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9123: return(0);
9124: }
9126: /*@
9127: MatPtAPNumeric - Computes the matrix product C = P^T * A * P
9129: Neighbor-wise Collective on Mat
9131: Input Parameters:
9132: + A - the matrix
9133: - P - the projection matrix
9135: Output Parameters:
9136: . C - the product matrix
9138: Notes:
9139: C must have been created by calling MatPtAPSymbolic and must be destroyed by
9140: the user using MatDeatroy().
9142: This routine is currently only implemented for pairs of AIJ matrices and classes
9143: which inherit from AIJ. C will be of type MATAIJ.
9145: Level: intermediate
9147: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
9148: @*/
9149: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
9150: {
9156: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9157: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9160: MatCheckPreallocated(P,2);
9161: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9162: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9165: MatCheckPreallocated(C,3);
9166: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9167: if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
9168: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9169: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9170: if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
9171: MatCheckPreallocated(A,1);
9173: if (!C->ops->ptapnumeric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"MatPtAPNumeric implementation is missing. You should call MatPtAPSymbolic first");
9174: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9175: (*C->ops->ptapnumeric)(A,P,C);
9176: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9177: return(0);
9178: }
9180: /*@
9181: MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P
9183: Neighbor-wise Collective on Mat
9185: Input Parameters:
9186: + A - the matrix
9187: - P - the projection matrix
9189: Output Parameters:
9190: . C - the (i,j) structure of the product matrix
9192: Notes:
9193: C will be created and must be destroyed by the user with MatDestroy().
9195: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9196: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
9197: this (i,j) structure by calling MatPtAPNumeric().
9199: Level: intermediate
9201: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
9202: @*/
9203: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
9204: {
9210: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9211: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9212: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9215: MatCheckPreallocated(P,2);
9216: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9217: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9220: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9221: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9222: MatCheckPreallocated(A,1);
9224: if (!A->ops->ptapsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatType %s",((PetscObject)A)->type_name);
9225: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
9226: (*A->ops->ptapsymbolic)(A,P,fill,C);
9227: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
9229: /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
9230: return(0);
9231: }
9233: /*@
9234: MatRARt - Creates the matrix product C = R * A * R^T
9236: Neighbor-wise Collective on Mat
9238: Input Parameters:
9239: + A - the matrix
9240: . R - the projection matrix
9241: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9242: - fill - expected fill as ratio of nnz(C)/nnz(A), use PETSC_DEFAULT if you do not have a good estimate
9243: if the result is a dense matrix this is irrelevent
9245: Output Parameters:
9246: . C - the product matrix
9248: Notes:
9249: C will be created and must be destroyed by the user with MatDestroy().
9251: This routine is currently only implemented for pairs of AIJ matrices and classes
9252: which inherit from AIJ. Due to PETSc sparse matrix block row distribution among processes,
9253: parallel MatRARt is implemented via explicit transpose of R, which could be very expensive.
9254: We recommend using MatPtAP().
9256: Level: intermediate
9258: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
9259: @*/
9260: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
9261: {
9267: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9268: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9269: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9272: MatCheckPreallocated(R,2);
9273: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9274: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9276: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)R),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9278: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9279: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9280: MatCheckPreallocated(A,1);
9282: if (!A->ops->rart) {
9283: Mat Rt;
9284: MatTranspose(R,MAT_INITIAL_MATRIX,&Rt);
9285: MatMatMatMult(R,A,Rt,scall,fill,C);
9286: MatDestroy(&Rt);
9287: return(0);
9288: }
9289: PetscLogEventBegin(MAT_RARt,A,R,0,0);
9290: (*A->ops->rart)(A,R,scall,fill,C);
9291: PetscLogEventEnd(MAT_RARt,A,R,0,0);
9292: return(0);
9293: }
9295: /*@
9296: MatRARtNumeric - Computes the matrix product C = R * A * R^T
9298: Neighbor-wise Collective on Mat
9300: Input Parameters:
9301: + A - the matrix
9302: - R - the projection matrix
9304: Output Parameters:
9305: . C - the product matrix
9307: Notes:
9308: C must have been created by calling MatRARtSymbolic and must be destroyed by
9309: the user using MatDestroy().
9311: This routine is currently only implemented for pairs of AIJ matrices and classes
9312: which inherit from AIJ. C will be of type MATAIJ.
9314: Level: intermediate
9316: .seealso: MatRARt(), MatRARtSymbolic(), MatMatMultNumeric()
9317: @*/
9318: PetscErrorCode MatRARtNumeric(Mat A,Mat R,Mat C)
9319: {
9325: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9326: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9329: MatCheckPreallocated(R,2);
9330: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9331: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9334: MatCheckPreallocated(C,3);
9335: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9336: if (R->rmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->rmap->N);
9337: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9338: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9339: if (R->rmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->cmap->N);
9340: MatCheckPreallocated(A,1);
9342: PetscLogEventBegin(MAT_RARtNumeric,A,R,0,0);
9343: (*A->ops->rartnumeric)(A,R,C);
9344: PetscLogEventEnd(MAT_RARtNumeric,A,R,0,0);
9345: return(0);
9346: }
9348: /*@
9349: MatRARtSymbolic - Creates the (i,j) structure of the matrix product C = R * A * R^T
9351: Neighbor-wise Collective on Mat
9353: Input Parameters:
9354: + A - the matrix
9355: - R - the projection matrix
9357: Output Parameters:
9358: . C - the (i,j) structure of the product matrix
9360: Notes:
9361: C will be created and must be destroyed by the user with MatDestroy().
9363: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9364: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
9365: this (i,j) structure by calling MatRARtNumeric().
9367: Level: intermediate
9369: .seealso: MatRARt(), MatRARtNumeric(), MatMatMultSymbolic()
9370: @*/
9371: PetscErrorCode MatRARtSymbolic(Mat A,Mat R,PetscReal fill,Mat *C)
9372: {
9378: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9379: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9380: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9383: MatCheckPreallocated(R,2);
9384: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9385: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9388: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9389: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9390: MatCheckPreallocated(A,1);
9391: PetscLogEventBegin(MAT_RARtSymbolic,A,R,0,0);
9392: (*A->ops->rartsymbolic)(A,R,fill,C);
9393: PetscLogEventEnd(MAT_RARtSymbolic,A,R,0,0);
9395: MatSetBlockSizes(*C,PetscAbs(R->rmap->bs),PetscAbs(R->rmap->bs));
9396: return(0);
9397: }
9399: /*@
9400: MatMatMult - Performs Matrix-Matrix Multiplication C=A*B.
9402: Neighbor-wise Collective on Mat
9404: Input Parameters:
9405: + A - the left matrix
9406: . B - the right matrix
9407: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9408: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate
9409: if the result is a dense matrix this is irrelevent
9411: Output Parameters:
9412: . C - the product matrix
9414: Notes:
9415: Unless scall is MAT_REUSE_MATRIX C will be created.
9417: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call and C was obtained from a previous
9418: call to this function with either MAT_INITIAL_MATRIX or MatMatMultSymbolic()
9420: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9421: actually needed.
9423: If you have many matrices with the same non-zero structure to multiply, you
9424: should either
9425: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
9426: $ 2) call MatMatMultSymbolic() once and then MatMatMultNumeric() for each product needed
9427: In the special case where matrix B (and hence C) are dense you can create the correctly sized matrix C yourself and then call this routine
9428: with MAT_REUSE_MATRIX, rather than first having MatMatMult() create it for you. You can NEVER do this if the matrix C is sparse.
9430: Level: intermediate
9432: .seealso: MatMatMultSymbolic(), MatMatMultNumeric(), MatTransposeMatMult(), MatMatTransposeMult(), MatPtAP()
9433: @*/
9434: PetscErrorCode MatMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9435: {
9437: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9438: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9439: PetscErrorCode (*mult)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9444: MatCheckPreallocated(A,1);
9445: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9446: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9449: MatCheckPreallocated(B,2);
9450: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9451: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9453: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9454: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9455: if (scall == MAT_REUSE_MATRIX) {
9458: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9459: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
9460: (*(*C)->ops->matmultnumeric)(A,B,*C);
9461: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
9462: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9463: return(0);
9464: }
9465: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9466: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9468: fA = A->ops->matmult;
9469: fB = B->ops->matmult;
9470: if (fB == fA) {
9471: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMult not supported for B of type %s",((PetscObject)B)->type_name);
9472: mult = fB;
9473: } else {
9474: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9475: char multname[256];
9476: PetscStrncpy(multname,"MatMatMult_",sizeof(multname));
9477: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
9478: PetscStrlcat(multname,"_",sizeof(multname));
9479: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
9480: PetscStrlcat(multname,"_C",sizeof(multname)); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9481: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9482: if (!mult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9483: }
9484: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9485: (*mult)(A,B,scall,fill,C);
9486: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9487: return(0);
9488: }
9490: /*@
9491: MatMatMultSymbolic - Performs construction, preallocation, and computes the ij structure
9492: of the matrix-matrix product C=A*B. Call this routine before calling MatMatMultNumeric().
9494: Neighbor-wise Collective on Mat
9496: Input Parameters:
9497: + A - the left matrix
9498: . B - the right matrix
9499: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate,
9500: if C is a dense matrix this is irrelevent
9502: Output Parameters:
9503: . C - the product matrix
9505: Notes:
9506: Unless scall is MAT_REUSE_MATRIX C will be created.
9508: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9509: actually needed.
9511: This routine is currently implemented for
9512: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type AIJ
9513: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9514: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9516: Level: intermediate
9518: Developers Note: There are ways to estimate the number of nonzeros in the resulting product, see for example, http://arxiv.org/abs/1006.4173
9519: We should incorporate them into PETSc.
9521: .seealso: MatMatMult(), MatMatMultNumeric()
9522: @*/
9523: PetscErrorCode MatMatMultSymbolic(Mat A,Mat B,PetscReal fill,Mat *C)
9524: {
9526: PetscErrorCode (*Asymbolic)(Mat,Mat,PetscReal,Mat*);
9527: PetscErrorCode (*Bsymbolic)(Mat,Mat,PetscReal,Mat*);
9528: PetscErrorCode (*symbolic)(Mat,Mat,PetscReal,Mat*)=NULL;
9533: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9534: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9538: MatCheckPreallocated(B,2);
9539: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9540: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9543: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9544: if (fill == PETSC_DEFAULT) fill = 2.0;
9545: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9546: MatCheckPreallocated(A,1);
9548: Asymbolic = A->ops->matmultsymbolic;
9549: Bsymbolic = B->ops->matmultsymbolic;
9550: if (Asymbolic == Bsymbolic) {
9551: if (!Bsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"C=A*B not implemented for B of type %s",((PetscObject)B)->type_name);
9552: symbolic = Bsymbolic;
9553: } else { /* dispatch based on the type of A and B */
9554: char symbolicname[256];
9555: PetscStrncpy(symbolicname,"MatMatMultSymbolic_",sizeof(symbolicname));
9556: PetscStrlcat(symbolicname,((PetscObject)A)->type_name,sizeof(symbolicname));
9557: PetscStrlcat(symbolicname,"_",sizeof(symbolicname));
9558: PetscStrlcat(symbolicname,((PetscObject)B)->type_name,sizeof(symbolicname));
9559: PetscStrlcat(symbolicname,"_C",sizeof(symbolicname));
9560: PetscObjectQueryFunction((PetscObject)B,symbolicname,&symbolic);
9561: if (!symbolic) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMultSymbolic requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9562: }
9563: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
9564: (*symbolic)(A,B,fill,C);
9565: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
9566: return(0);
9567: }
9569: /*@
9570: MatMatMultNumeric - Performs the numeric matrix-matrix product.
9571: Call this routine after first calling MatMatMultSymbolic().
9573: Neighbor-wise Collective on Mat
9575: Input Parameters:
9576: + A - the left matrix
9577: - B - the right matrix
9579: Output Parameters:
9580: . C - the product matrix, which was created by from MatMatMultSymbolic() or a call to MatMatMult().
9582: Notes:
9583: C must have been created with MatMatMultSymbolic().
9585: This routine is currently implemented for
9586: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type MATAIJ.
9587: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9588: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9590: Level: intermediate
9592: .seealso: MatMatMult(), MatMatMultSymbolic()
9593: @*/
9594: PetscErrorCode MatMatMultNumeric(Mat A,Mat B,Mat C)
9595: {
9599: MatMatMult(A,B,MAT_REUSE_MATRIX,0.0,&C);
9600: return(0);
9601: }
9603: /*@
9604: MatMatTransposeMult - Performs Matrix-Matrix Multiplication C=A*B^T.
9606: Neighbor-wise Collective on Mat
9608: Input Parameters:
9609: + A - the left matrix
9610: . B - the right matrix
9611: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9612: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9614: Output Parameters:
9615: . C - the product matrix
9617: Notes:
9618: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9620: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9622: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9623: actually needed.
9625: This routine is currently only implemented for pairs of SeqAIJ matrices and for the SeqDense class.
9627: Level: intermediate
9629: .seealso: MatMatTransposeMultSymbolic(), MatMatTransposeMultNumeric(), MatMatMult(), MatTransposeMatMult() MatPtAP()
9630: @*/
9631: PetscErrorCode MatMatTransposeMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9632: {
9634: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9635: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9640: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9641: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9642: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9645: MatCheckPreallocated(B,2);
9646: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9647: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9649: if (B->cmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, AN %D != BN %D",A->cmap->N,B->cmap->N);
9650: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9651: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9652: MatCheckPreallocated(A,1);
9654: fA = A->ops->mattransposemult;
9655: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for A of type %s",((PetscObject)A)->type_name);
9656: fB = B->ops->mattransposemult;
9657: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for B of type %s",((PetscObject)B)->type_name);
9658: if (fB!=fA) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatTransposeMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9660: PetscLogEventBegin(MAT_MatTransposeMult,A,B,0,0);
9661: if (scall == MAT_INITIAL_MATRIX) {
9662: PetscLogEventBegin(MAT_MatTransposeMultSymbolic,A,B,0,0);
9663: (*A->ops->mattransposemultsymbolic)(A,B,fill,C);
9664: PetscLogEventEnd(MAT_MatTransposeMultSymbolic,A,B,0,0);
9665: }
9666: PetscLogEventBegin(MAT_MatTransposeMultNumeric,A,B,0,0);
9667: (*A->ops->mattransposemultnumeric)(A,B,*C);
9668: PetscLogEventEnd(MAT_MatTransposeMultNumeric,A,B,0,0);
9669: PetscLogEventEnd(MAT_MatTransposeMult,A,B,0,0);
9670: return(0);
9671: }
9673: /*@
9674: MatTransposeMatMult - Performs Matrix-Matrix Multiplication C=A^T*B.
9676: Neighbor-wise Collective on Mat
9678: Input Parameters:
9679: + A - the left matrix
9680: . B - the right matrix
9681: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9682: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9684: Output Parameters:
9685: . C - the product matrix
9687: Notes:
9688: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9690: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9692: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9693: actually needed.
9695: This routine is currently implemented for pairs of AIJ matrices and pairs of SeqDense matrices and classes
9696: which inherit from SeqAIJ. C will be of same type as the input matrices.
9698: Level: intermediate
9700: .seealso: MatTransposeMatMultSymbolic(), MatTransposeMatMultNumeric(), MatMatMult(), MatMatTransposeMult(), MatPtAP()
9701: @*/
9702: PetscErrorCode MatTransposeMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9703: {
9705: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9706: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9707: PetscErrorCode (*transposematmult)(Mat,Mat,MatReuse,PetscReal,Mat*) = NULL;
9712: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9713: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9714: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9717: MatCheckPreallocated(B,2);
9718: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9719: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9721: if (B->rmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->rmap->N);
9722: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9723: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9724: MatCheckPreallocated(A,1);
9726: fA = A->ops->transposematmult;
9727: fB = B->ops->transposematmult;
9728: if (fB==fA) {
9729: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatTransposeMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9730: transposematmult = fA;
9731: } else {
9732: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9733: char multname[256];
9734: PetscStrncpy(multname,"MatTransposeMatMult_",sizeof(multname));
9735: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
9736: PetscStrlcat(multname,"_",sizeof(multname));
9737: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
9738: PetscStrlcat(multname,"_C",sizeof(multname)); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9739: PetscObjectQueryFunction((PetscObject)B,multname,&transposematmult);
9740: if (!transposematmult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatTransposeMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9741: }
9742: PetscLogEventBegin(MAT_TransposeMatMult,A,B,0,0);
9743: (*transposematmult)(A,B,scall,fill,C);
9744: PetscLogEventEnd(MAT_TransposeMatMult,A,B,0,0);
9745: return(0);
9746: }
9748: /*@
9749: MatMatMatMult - Performs Matrix-Matrix-Matrix Multiplication D=A*B*C.
9751: Neighbor-wise Collective on Mat
9753: Input Parameters:
9754: + A - the left matrix
9755: . B - the middle matrix
9756: . C - the right matrix
9757: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9758: - fill - expected fill as ratio of nnz(D)/(nnz(A) + nnz(B)+nnz(C)), use PETSC_DEFAULT if you do not have a good estimate
9759: if the result is a dense matrix this is irrelevent
9761: Output Parameters:
9762: . D - the product matrix
9764: Notes:
9765: Unless scall is MAT_REUSE_MATRIX D will be created.
9767: MAT_REUSE_MATRIX can only be used if the matrices A, B and C have the same nonzero pattern as in the previous call
9769: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9770: actually needed.
9772: If you have many matrices with the same non-zero structure to multiply, you
9773: should use MAT_REUSE_MATRIX in all calls but the first or
9775: Level: intermediate
9777: .seealso: MatMatMult, MatPtAP()
9778: @*/
9779: PetscErrorCode MatMatMatMult(Mat A,Mat B,Mat C,MatReuse scall,PetscReal fill,Mat *D)
9780: {
9782: PetscErrorCode (*fA)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9783: PetscErrorCode (*fB)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9784: PetscErrorCode (*fC)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9785: PetscErrorCode (*mult)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9790: MatCheckPreallocated(A,1);
9791: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9792: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9793: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9796: MatCheckPreallocated(B,2);
9797: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9798: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9801: MatCheckPreallocated(C,3);
9802: if (!C->assembled) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9803: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9804: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9805: if (C->rmap->N!=B->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",C->rmap->N,B->cmap->N);
9806: if (scall == MAT_REUSE_MATRIX) {
9809: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9810: (*(*D)->ops->matmatmult)(A,B,C,scall,fill,D);
9811: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9812: return(0);
9813: }
9814: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9815: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9817: fA = A->ops->matmatmult;
9818: fB = B->ops->matmatmult;
9819: fC = C->ops->matmatmult;
9820: if (fA == fB && fA == fC) {
9821: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9822: mult = fA;
9823: } else {
9824: /* dispatch based on the type of A, B and C from their PetscObject's PetscFunctionLists. */
9825: char multname[256];
9826: PetscStrncpy(multname,"MatMatMatMult_",sizeof(multname));
9827: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
9828: PetscStrlcat(multname,"_",sizeof(multname));
9829: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
9830: PetscStrlcat(multname,"_",sizeof(multname));
9831: PetscStrlcat(multname,((PetscObject)C)->type_name,sizeof(multname));
9832: PetscStrlcat(multname,"_C",sizeof(multname));
9833: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9834: if (!mult) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMatMult requires A, %s, to be compatible with B, %s, C, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name,((PetscObject)C)->type_name);
9835: }
9836: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9837: (*mult)(A,B,C,scall,fill,D);
9838: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9839: return(0);
9840: }
9842: /*@
9843: MatCreateRedundantMatrix - Create redundant matrices and put them into processors of subcommunicators.
9845: Collective on Mat
9847: Input Parameters:
9848: + mat - the matrix
9849: . nsubcomm - the number of subcommunicators (= number of redundant parallel or sequential matrices)
9850: . subcomm - MPI communicator split from the communicator where mat resides in (or MPI_COMM_NULL if nsubcomm is used)
9851: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9853: Output Parameter:
9854: . matredundant - redundant matrix
9856: Notes:
9857: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
9858: original matrix has not changed from that last call to MatCreateRedundantMatrix().
9860: This routine creates the duplicated matrices in subcommunicators; you should NOT create them before
9861: calling it.
9863: Level: advanced
9865: Concepts: subcommunicator
9866: Concepts: duplicate matrix
9868: .seealso: MatDestroy()
9869: @*/
9870: PetscErrorCode MatCreateRedundantMatrix(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,MatReuse reuse,Mat *matredundant)
9871: {
9873: MPI_Comm comm;
9874: PetscMPIInt size;
9875: PetscInt mloc_sub,nloc_sub,rstart,rend,M=mat->rmap->N,N=mat->cmap->N,bs=mat->rmap->bs;
9876: Mat_Redundant *redund=NULL;
9877: PetscSubcomm psubcomm=NULL;
9878: MPI_Comm subcomm_in=subcomm;
9879: Mat *matseq;
9880: IS isrow,iscol;
9881: PetscBool newsubcomm=PETSC_FALSE;
9885: if (nsubcomm && reuse == MAT_REUSE_MATRIX) {
9888: }
9890: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
9891: if (size == 1 || nsubcomm == 1) {
9892: if (reuse == MAT_INITIAL_MATRIX) {
9893: MatDuplicate(mat,MAT_COPY_VALUES,matredundant);
9894: } else {
9895: if (*matredundant == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
9896: MatCopy(mat,*matredundant,SAME_NONZERO_PATTERN);
9897: }
9898: return(0);
9899: }
9901: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9902: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9903: MatCheckPreallocated(mat,1);
9905: PetscLogEventBegin(MAT_RedundantMat,mat,0,0,0);
9906: if (subcomm_in == MPI_COMM_NULL && reuse == MAT_INITIAL_MATRIX) { /* get subcomm if user does not provide subcomm */
9907: /* create psubcomm, then get subcomm */
9908: PetscObjectGetComm((PetscObject)mat,&comm);
9909: MPI_Comm_size(comm,&size);
9910: if (nsubcomm < 1 || nsubcomm > size) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"nsubcomm must between 1 and %D",size);
9912: PetscSubcommCreate(comm,&psubcomm);
9913: PetscSubcommSetNumber(psubcomm,nsubcomm);
9914: PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);
9915: PetscSubcommSetFromOptions(psubcomm);
9916: PetscCommDuplicate(PetscSubcommChild(psubcomm),&subcomm,NULL);
9917: newsubcomm = PETSC_TRUE;
9918: PetscSubcommDestroy(&psubcomm);
9919: }
9921: /* get isrow, iscol and a local sequential matrix matseq[0] */
9922: if (reuse == MAT_INITIAL_MATRIX) {
9923: mloc_sub = PETSC_DECIDE;
9924: nloc_sub = PETSC_DECIDE;
9925: if (bs < 1) {
9926: PetscSplitOwnership(subcomm,&mloc_sub,&M);
9927: PetscSplitOwnership(subcomm,&nloc_sub,&N);
9928: } else {
9929: PetscSplitOwnershipBlock(subcomm,bs,&mloc_sub,&M);
9930: PetscSplitOwnershipBlock(subcomm,bs,&nloc_sub,&N);
9931: }
9932: MPI_Scan(&mloc_sub,&rend,1,MPIU_INT,MPI_SUM,subcomm);
9933: rstart = rend - mloc_sub;
9934: ISCreateStride(PETSC_COMM_SELF,mloc_sub,rstart,1,&isrow);
9935: ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol);
9936: } else { /* reuse == MAT_REUSE_MATRIX */
9937: if (*matredundant == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
9938: /* retrieve subcomm */
9939: PetscObjectGetComm((PetscObject)(*matredundant),&subcomm);
9940: redund = (*matredundant)->redundant;
9941: isrow = redund->isrow;
9942: iscol = redund->iscol;
9943: matseq = redund->matseq;
9944: }
9945: MatCreateSubMatrices(mat,1,&isrow,&iscol,reuse,&matseq);
9947: /* get matredundant over subcomm */
9948: if (reuse == MAT_INITIAL_MATRIX) {
9949: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],nloc_sub,reuse,matredundant);
9951: /* create a supporting struct and attach it to C for reuse */
9952: PetscNewLog(*matredundant,&redund);
9953: (*matredundant)->redundant = redund;
9954: redund->isrow = isrow;
9955: redund->iscol = iscol;
9956: redund->matseq = matseq;
9957: if (newsubcomm) {
9958: redund->subcomm = subcomm;
9959: } else {
9960: redund->subcomm = MPI_COMM_NULL;
9961: }
9962: } else {
9963: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],PETSC_DECIDE,reuse,matredundant);
9964: }
9965: PetscLogEventEnd(MAT_RedundantMat,mat,0,0,0);
9966: return(0);
9967: }
9969: /*@C
9970: MatGetMultiProcBlock - Create multiple [bjacobi] 'parallel submatrices' from
9971: a given 'mat' object. Each submatrix can span multiple procs.
9973: Collective on Mat
9975: Input Parameters:
9976: + mat - the matrix
9977: . subcomm - the subcommunicator obtained by com_split(comm)
9978: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9980: Output Parameter:
9981: . subMat - 'parallel submatrices each spans a given subcomm
9983: Notes:
9984: The submatrix partition across processors is dictated by 'subComm' a
9985: communicator obtained by com_split(comm). The comm_split
9986: is not restriced to be grouped with consecutive original ranks.
9988: Due the comm_split() usage, the parallel layout of the submatrices
9989: map directly to the layout of the original matrix [wrt the local
9990: row,col partitioning]. So the original 'DiagonalMat' naturally maps
9991: into the 'DiagonalMat' of the subMat, hence it is used directly from
9992: the subMat. However the offDiagMat looses some columns - and this is
9993: reconstructed with MatSetValues()
9995: Level: advanced
9997: Concepts: subcommunicator
9998: Concepts: submatrices
10000: .seealso: MatCreateSubMatrices()
10001: @*/
10002: PetscErrorCode MatGetMultiProcBlock(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
10003: {
10005: PetscMPIInt commsize,subCommSize;
10008: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
10009: MPI_Comm_size(subComm,&subCommSize);
10010: if (subCommSize > commsize) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"CommSize %D < SubCommZize %D",commsize,subCommSize);
10012: if (scall == MAT_REUSE_MATRIX && *subMat == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10013: PetscLogEventBegin(MAT_GetMultiProcBlock,mat,0,0,0);
10014: (*mat->ops->getmultiprocblock)(mat,subComm,scall,subMat);
10015: PetscLogEventEnd(MAT_GetMultiProcBlock,mat,0,0,0);
10016: return(0);
10017: }
10019: /*@
10020: MatGetLocalSubMatrix - Gets a reference to a submatrix specified in local numbering
10022: Not Collective
10024: Input Arguments:
10025: mat - matrix to extract local submatrix from
10026: isrow - local row indices for submatrix
10027: iscol - local column indices for submatrix
10029: Output Arguments:
10030: submat - the submatrix
10032: Level: intermediate
10034: Notes:
10035: The submat should be returned with MatRestoreLocalSubMatrix().
10037: Depending on the format of mat, the returned submat may not implement MatMult(). Its communicator may be
10038: the same as mat, it may be PETSC_COMM_SELF, or some other subcomm of mat's.
10040: The submat always implements MatSetValuesLocal(). If isrow and iscol have the same block size, then
10041: MatSetValuesBlockedLocal() will also be implemented.
10043: The mat must have had a ISLocalToGlobalMapping provided to it with MatSetLocalToGlobalMapping(). Note that
10044: matrices obtained with DMCreateMat() generally already have the local to global mapping provided.
10046: .seealso: MatRestoreLocalSubMatrix(), MatCreateLocalRef(), MatSetLocalToGlobalMapping()
10047: @*/
10048: PetscErrorCode MatGetLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
10049: {
10058: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must have local to global mapping provided before this call");
10060: if (mat->ops->getlocalsubmatrix) {
10061: (*mat->ops->getlocalsubmatrix)(mat,isrow,iscol,submat);
10062: } else {
10063: MatCreateLocalRef(mat,isrow,iscol,submat);
10064: }
10065: return(0);
10066: }
10068: /*@
10069: MatRestoreLocalSubMatrix - Restores a reference to a submatrix specified in local numbering
10071: Not Collective
10073: Input Arguments:
10074: mat - matrix to extract local submatrix from
10075: isrow - local row indices for submatrix
10076: iscol - local column indices for submatrix
10077: submat - the submatrix
10079: Level: intermediate
10081: .seealso: MatGetLocalSubMatrix()
10082: @*/
10083: PetscErrorCode MatRestoreLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
10084: {
10093: if (*submat) {
10095: }
10097: if (mat->ops->restorelocalsubmatrix) {
10098: (*mat->ops->restorelocalsubmatrix)(mat,isrow,iscol,submat);
10099: } else {
10100: MatDestroy(submat);
10101: }
10102: *submat = NULL;
10103: return(0);
10104: }
10106: /* --------------------------------------------------------*/
10107: /*@
10108: MatFindZeroDiagonals - Finds all the rows of a matrix that have zero or no diagonal entry in the matrix
10110: Collective on Mat
10112: Input Parameter:
10113: . mat - the matrix
10115: Output Parameter:
10116: . is - if any rows have zero diagonals this contains the list of them
10118: Level: developer
10120: Concepts: matrix-vector product
10122: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
10123: @*/
10124: PetscErrorCode MatFindZeroDiagonals(Mat mat,IS *is)
10125: {
10131: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10132: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10134: if (!mat->ops->findzerodiagonals) {
10135: Vec diag;
10136: const PetscScalar *a;
10137: PetscInt *rows;
10138: PetscInt rStart, rEnd, r, nrow = 0;
10140: MatCreateVecs(mat, &diag, NULL);
10141: MatGetDiagonal(mat, diag);
10142: MatGetOwnershipRange(mat, &rStart, &rEnd);
10143: VecGetArrayRead(diag, &a);
10144: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) ++nrow;
10145: PetscMalloc1(nrow, &rows);
10146: nrow = 0;
10147: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) rows[nrow++] = r+rStart;
10148: VecRestoreArrayRead(diag, &a);
10149: VecDestroy(&diag);
10150: ISCreateGeneral(PetscObjectComm((PetscObject) mat), nrow, rows, PETSC_OWN_POINTER, is);
10151: } else {
10152: (*mat->ops->findzerodiagonals)(mat, is);
10153: }
10154: return(0);
10155: }
10157: /*@
10158: MatFindOffBlockDiagonalEntries - Finds all the rows of a matrix that have entries outside of the main diagonal block (defined by the matrix block size)
10160: Collective on Mat
10162: Input Parameter:
10163: . mat - the matrix
10165: Output Parameter:
10166: . is - contains the list of rows with off block diagonal entries
10168: Level: developer
10170: Concepts: matrix-vector product
10172: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
10173: @*/
10174: PetscErrorCode MatFindOffBlockDiagonalEntries(Mat mat,IS *is)
10175: {
10181: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10182: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10184: if (!mat->ops->findoffblockdiagonalentries) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a find off block diagonal entries defined");
10185: (*mat->ops->findoffblockdiagonalentries)(mat,is);
10186: return(0);
10187: }
10189: /*@C
10190: MatInvertBlockDiagonal - Inverts the block diagonal entries.
10192: Collective on Mat
10194: Input Parameters:
10195: . mat - the matrix
10197: Output Parameters:
10198: . values - the block inverses in column major order (FORTRAN-like)
10200: Note:
10201: This routine is not available from Fortran.
10203: Level: advanced
10205: .seealso: MatInvertBockDiagonalMat
10206: @*/
10207: PetscErrorCode MatInvertBlockDiagonal(Mat mat,const PetscScalar **values)
10208: {
10213: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10214: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10215: if (!mat->ops->invertblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
10216: (*mat->ops->invertblockdiagonal)(mat,values);
10217: return(0);
10218: }
10220: /*@
10221: MatInvertBlockDiagonalMat - set matrix C to be the inverted block diagonal of matrix A
10223: Collective on Mat
10225: Input Parameters:
10226: . A - the matrix
10228: Output Parameters:
10229: . C - matrix with inverted block diagonal of A. This matrix should be created and may have its type set.
10231: Level: advanced
10233: .seealso: MatInvertBockDiagonal()
10234: @*/
10235: PetscErrorCode MatInvertBlockDiagonalMat(Mat A,Mat C)
10236: {
10237: PetscErrorCode ierr;
10238: const PetscScalar *vals;
10239: PetscInt *dnnz;
10240: PetscInt M,N,m,n,rstart,rend,bs,i,j;
10243: MatInvertBlockDiagonal(A,&vals);
10244: MatGetBlockSize(A,&bs);
10245: MatGetSize(A,&M,&N);
10246: MatGetLocalSize(A,&m,&n);
10247: MatSetSizes(C,m,n,M,N);
10248: MatSetBlockSize(C,bs);
10249: PetscMalloc1(m/bs,&dnnz);
10250: for(j = 0; j < m/bs; j++) {
10251: dnnz[j] = 1;
10252: }
10253: MatXAIJSetPreallocation(C,bs,dnnz,NULL,NULL,NULL);
10254: PetscFree(dnnz);
10255: MatGetOwnershipRange(C,&rstart,&rend);
10256: MatSetOption(C,MAT_ROW_ORIENTED,PETSC_FALSE);
10257: for (i = rstart/bs; i < rend/bs; i++) {
10258: MatSetValuesBlocked(C,1,&i,1,&i,&vals[(i-rstart/bs)*bs*bs],INSERT_VALUES);
10259: }
10260: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
10261: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
10262: MatSetOption(C,MAT_ROW_ORIENTED,PETSC_TRUE);
10263: return(0);
10264: }
10266: /*@C
10267: MatTransposeColoringDestroy - Destroys a coloring context for matrix product C=A*B^T that was created
10268: via MatTransposeColoringCreate().
10270: Collective on MatTransposeColoring
10272: Input Parameter:
10273: . c - coloring context
10275: Level: intermediate
10277: .seealso: MatTransposeColoringCreate()
10278: @*/
10279: PetscErrorCode MatTransposeColoringDestroy(MatTransposeColoring *c)
10280: {
10281: PetscErrorCode ierr;
10282: MatTransposeColoring matcolor=*c;
10285: if (!matcolor) return(0);
10286: if (--((PetscObject)matcolor)->refct > 0) {matcolor = 0; return(0);}
10288: PetscFree3(matcolor->ncolumns,matcolor->nrows,matcolor->colorforrow);
10289: PetscFree(matcolor->rows);
10290: PetscFree(matcolor->den2sp);
10291: PetscFree(matcolor->colorforcol);
10292: PetscFree(matcolor->columns);
10293: if (matcolor->brows>0) {
10294: PetscFree(matcolor->lstart);
10295: }
10296: PetscHeaderDestroy(c);
10297: return(0);
10298: }
10300: /*@C
10301: MatTransColoringApplySpToDen - Given a symbolic matrix product C=A*B^T for which
10302: a MatTransposeColoring context has been created, computes a dense B^T by Apply
10303: MatTransposeColoring to sparse B.
10305: Collective on MatTransposeColoring
10307: Input Parameters:
10308: + B - sparse matrix B
10309: . Btdense - symbolic dense matrix B^T
10310: - coloring - coloring context created with MatTransposeColoringCreate()
10312: Output Parameter:
10313: . Btdense - dense matrix B^T
10315: Level: advanced
10317: Notes: These are used internally for some implementations of MatRARt()
10319: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplyDenToSp()
10321: .keywords: coloring
10322: @*/
10323: PetscErrorCode MatTransColoringApplySpToDen(MatTransposeColoring coloring,Mat B,Mat Btdense)
10324: {
10332: if (!B->ops->transcoloringapplysptoden) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)B)->type_name);
10333: (B->ops->transcoloringapplysptoden)(coloring,B,Btdense);
10334: return(0);
10335: }
10337: /*@C
10338: MatTransColoringApplyDenToSp - Given a symbolic matrix product Csp=A*B^T for which
10339: a MatTransposeColoring context has been created and a dense matrix Cden=A*Btdense
10340: in which Btdens is obtained from MatTransColoringApplySpToDen(), recover sparse matrix
10341: Csp from Cden.
10343: Collective on MatTransposeColoring
10345: Input Parameters:
10346: + coloring - coloring context created with MatTransposeColoringCreate()
10347: - Cden - matrix product of a sparse matrix and a dense matrix Btdense
10349: Output Parameter:
10350: . Csp - sparse matrix
10352: Level: advanced
10354: Notes: These are used internally for some implementations of MatRARt()
10356: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplySpToDen()
10358: .keywords: coloring
10359: @*/
10360: PetscErrorCode MatTransColoringApplyDenToSp(MatTransposeColoring matcoloring,Mat Cden,Mat Csp)
10361: {
10369: if (!Csp->ops->transcoloringapplydentosp) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)Csp)->type_name);
10370: (Csp->ops->transcoloringapplydentosp)(matcoloring,Cden,Csp);
10371: return(0);
10372: }
10374: /*@C
10375: MatTransposeColoringCreate - Creates a matrix coloring context for matrix product C=A*B^T.
10377: Collective on Mat
10379: Input Parameters:
10380: + mat - the matrix product C
10381: - iscoloring - the coloring of the matrix; usually obtained with MatColoringCreate() or DMCreateColoring()
10383: Output Parameter:
10384: . color - the new coloring context
10386: Level: intermediate
10388: .seealso: MatTransposeColoringDestroy(), MatTransColoringApplySpToDen(),
10389: MatTransColoringApplyDenToSp()
10390: @*/
10391: PetscErrorCode MatTransposeColoringCreate(Mat mat,ISColoring iscoloring,MatTransposeColoring *color)
10392: {
10393: MatTransposeColoring c;
10394: MPI_Comm comm;
10395: PetscErrorCode ierr;
10398: PetscLogEventBegin(MAT_TransposeColoringCreate,mat,0,0,0);
10399: PetscObjectGetComm((PetscObject)mat,&comm);
10400: PetscHeaderCreate(c,MAT_TRANSPOSECOLORING_CLASSID,"MatTransposeColoring","Matrix product C=A*B^T via coloring","Mat",comm,MatTransposeColoringDestroy,NULL);
10402: c->ctype = iscoloring->ctype;
10403: if (mat->ops->transposecoloringcreate) {
10404: (*mat->ops->transposecoloringcreate)(mat,iscoloring,c);
10405: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Code not yet written for this matrix type");
10407: *color = c;
10408: PetscLogEventEnd(MAT_TransposeColoringCreate,mat,0,0,0);
10409: return(0);
10410: }
10412: /*@
10413: MatGetNonzeroState - Returns a 64 bit integer representing the current state of nonzeros in the matrix. If the
10414: matrix has had no new nonzero locations added to the matrix since the previous call then the value will be the
10415: same, otherwise it will be larger
10417: Not Collective
10419: Input Parameter:
10420: . A - the matrix
10422: Output Parameter:
10423: . state - the current state
10425: Notes: You can only compare states from two different calls to the SAME matrix, you cannot compare calls between
10426: different matrices
10428: Level: intermediate
10430: @*/
10431: PetscErrorCode MatGetNonzeroState(Mat mat,PetscObjectState *state)
10432: {
10435: *state = mat->nonzerostate;
10436: return(0);
10437: }
10439: /*@
10440: MatCreateMPIMatConcatenateSeqMat - Creates a single large PETSc matrix by concatenating sequential
10441: matrices from each processor
10443: Collective on MPI_Comm
10445: Input Parameters:
10446: + comm - the communicators the parallel matrix will live on
10447: . seqmat - the input sequential matrices
10448: . n - number of local columns (or PETSC_DECIDE)
10449: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10451: Output Parameter:
10452: . mpimat - the parallel matrix generated
10454: Level: advanced
10456: Notes: The number of columns of the matrix in EACH processor MUST be the same.
10458: @*/
10459: PetscErrorCode MatCreateMPIMatConcatenateSeqMat(MPI_Comm comm,Mat seqmat,PetscInt n,MatReuse reuse,Mat *mpimat)
10460: {
10464: if (!seqmat->ops->creatempimatconcatenateseqmat) SETERRQ1(PetscObjectComm((PetscObject)seqmat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)seqmat)->type_name);
10465: if (reuse == MAT_REUSE_MATRIX && seqmat == *mpimat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10467: PetscLogEventBegin(MAT_Merge,seqmat,0,0,0);
10468: (*seqmat->ops->creatempimatconcatenateseqmat)(comm,seqmat,n,reuse,mpimat);
10469: PetscLogEventEnd(MAT_Merge,seqmat,0,0,0);
10470: return(0);
10471: }
10473: /*@
10474: MatSubdomainsCreateCoalesce - Creates index subdomains by coalescing adjacent
10475: ranks' ownership ranges.
10477: Collective on A
10479: Input Parameters:
10480: + A - the matrix to create subdomains from
10481: - N - requested number of subdomains
10484: Output Parameters:
10485: + n - number of subdomains resulting on this rank
10486: - iss - IS list with indices of subdomains on this rank
10488: Level: advanced
10490: Notes: number of subdomains must be smaller than the communicator size
10491: @*/
10492: PetscErrorCode MatSubdomainsCreateCoalesce(Mat A,PetscInt N,PetscInt *n,IS *iss[])
10493: {
10494: MPI_Comm comm,subcomm;
10495: PetscMPIInt size,rank,color;
10496: PetscInt rstart,rend,k;
10497: PetscErrorCode ierr;
10500: PetscObjectGetComm((PetscObject)A,&comm);
10501: MPI_Comm_size(comm,&size);
10502: MPI_Comm_rank(comm,&rank);
10503: if (N < 1 || N >= (PetscInt)size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subdomains must be > 0 and < %D, got N = %D",size,N);
10504: *n = 1;
10505: k = ((PetscInt)size)/N + ((PetscInt)size%N>0); /* There are up to k ranks to a color */
10506: color = rank/k;
10507: MPI_Comm_split(comm,color,rank,&subcomm);
10508: PetscMalloc1(1,iss);
10509: MatGetOwnershipRange(A,&rstart,&rend);
10510: ISCreateStride(subcomm,rend-rstart,rstart,1,iss[0]);
10511: MPI_Comm_free(&subcomm);
10512: return(0);
10513: }
10515: /*@
10516: MatGalerkin - Constructs the coarse grid problem via Galerkin projection.
10518: If the interpolation and restriction operators are the same, uses MatPtAP.
10519: If they are not the same, use MatMatMatMult.
10521: Once the coarse grid problem is constructed, correct for interpolation operators
10522: that are not of full rank, which can legitimately happen in the case of non-nested
10523: geometric multigrid.
10525: Input Parameters:
10526: + restrct - restriction operator
10527: . dA - fine grid matrix
10528: . interpolate - interpolation operator
10529: . reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10530: - fill - expected fill, use PETSC_DEFAULT if you do not have a good estimate
10532: Output Parameters:
10533: . A - the Galerkin coarse matrix
10535: Options Database Key:
10536: . -pc_mg_galerkin <both,pmat,mat,none>
10538: Level: developer
10540: .keywords: MG, multigrid, Galerkin
10542: .seealso: MatPtAP(), MatMatMatMult()
10543: @*/
10544: PetscErrorCode MatGalerkin(Mat restrct, Mat dA, Mat interpolate, MatReuse reuse, PetscReal fill, Mat *A)
10545: {
10547: IS zerorows;
10548: Vec diag;
10551: if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
10552: /* Construct the coarse grid matrix */
10553: if (interpolate == restrct) {
10554: MatPtAP(dA,interpolate,reuse,fill,A);
10555: } else {
10556: MatMatMatMult(restrct,dA,interpolate,reuse,fill,A);
10557: }
10559: /* If the interpolation matrix is not of full rank, A will have zero rows.
10560: This can legitimately happen in the case of non-nested geometric multigrid.
10561: In that event, we set the rows of the matrix to the rows of the identity,
10562: ignoring the equations (as the RHS will also be zero). */
10564: MatFindZeroRows(*A, &zerorows);
10566: if (zerorows != NULL) { /* if there are any zero rows */
10567: MatCreateVecs(*A, &diag, NULL);
10568: MatGetDiagonal(*A, diag);
10569: VecISSet(diag, zerorows, 1.0);
10570: MatDiagonalSet(*A, diag, INSERT_VALUES);
10571: VecDestroy(&diag);
10572: ISDestroy(&zerorows);
10573: }
10574: return(0);
10575: }
10577: /*@C
10578: MatSetOperation - Allows user to set a matrix operation for any matrix type
10580: Logically Collective on Mat
10582: Input Parameters:
10583: + mat - the matrix
10584: . op - the name of the operation
10585: - f - the function that provides the operation
10587: Level: developer
10589: Usage:
10590: $ extern PetscErrorCode usermult(Mat,Vec,Vec);
10591: $ MatCreateXXX(comm,...&A);
10592: $ MatSetOperation(A,MATOP_MULT,(void(*)(void))usermult);
10594: Notes:
10595: See the file include/petscmat.h for a complete list of matrix
10596: operations, which all have the form MATOP_<OPERATION>, where
10597: <OPERATION> is the name (in all capital letters) of the
10598: user interface routine (e.g., MatMult() -> MATOP_MULT).
10600: All user-provided functions (except for MATOP_DESTROY) should have the same calling
10601: sequence as the usual matrix interface routines, since they
10602: are intended to be accessed via the usual matrix interface
10603: routines, e.g.,
10604: $ MatMult(Mat,Vec,Vec) -> usermult(Mat,Vec,Vec)
10606: In particular each function MUST return an error code of 0 on success and
10607: nonzero on failure.
10609: This routine is distinct from MatShellSetOperation() in that it can be called on any matrix type.
10611: .keywords: matrix, set, operation
10613: .seealso: MatGetOperation(), MatCreateShell(), MatShellSetContext(), MatShellSetOperation()
10614: @*/
10615: PetscErrorCode MatSetOperation(Mat mat,MatOperation op,void (*f)(void))
10616: {
10619: (((void(**)(void))mat->ops)[op]) = f;
10620: return(0);
10621: }
10623: /*@C
10624: MatGetOperation - Gets a matrix operation for any matrix type.
10626: Not Collective
10628: Input Parameters:
10629: + mat - the matrix
10630: - op - the name of the operation
10632: Output Parameter:
10633: . f - the function that provides the operation
10635: Level: developer
10637: Usage:
10638: $ PetscErrorCode (*usermult)(Mat,Vec,Vec);
10639: $ MatGetOperation(A,MATOP_MULT,(void(**)(void))&usermult);
10641: Notes:
10642: See the file include/petscmat.h for a complete list of matrix
10643: operations, which all have the form MATOP_<OPERATION>, where
10644: <OPERATION> is the name (in all capital letters) of the
10645: user interface routine (e.g., MatMult() -> MATOP_MULT).
10647: This routine is distinct from MatShellGetOperation() in that it can be called on any matrix type.
10649: .keywords: matrix, get, operation
10651: .seealso: MatSetOperation(), MatCreateShell(), MatShellGetContext(), MatShellGetOperation()
10652: @*/
10653: PetscErrorCode MatGetOperation(Mat mat,MatOperation op,void(**f)(void))
10654: {
10657: *f = (((void (**)(void))mat->ops)[op]);
10658: return(0);
10659: }
10661: /*@
10662: MatHasOperation - Determines whether the given matrix supports the particular
10663: operation.
10665: Not Collective
10667: Input Parameters:
10668: + mat - the matrix
10669: - op - the operation, for example, MATOP_GET_DIAGONAL
10671: Output Parameter:
10672: . has - either PETSC_TRUE or PETSC_FALSE
10674: Level: advanced
10676: Notes:
10677: See the file include/petscmat.h for a complete list of matrix
10678: operations, which all have the form MATOP_<OPERATION>, where
10679: <OPERATION> is the name (in all capital letters) of the
10680: user-level routine. E.g., MatNorm() -> MATOP_NORM.
10682: .keywords: matrix, has, operation
10684: .seealso: MatCreateShell()
10685: @*/
10686: PetscErrorCode MatHasOperation(Mat mat,MatOperation op,PetscBool *has)
10687: {
10694: if (mat->ops->hasoperation) {
10695: (*mat->ops->hasoperation)(mat,op,has);
10696: } else {
10697: if (((void**)mat->ops)[op]) *has = PETSC_TRUE;
10698: else {
10699: *has = PETSC_FALSE;
10700: if (op == MATOP_CREATE_SUBMATRIX) {
10701: PetscMPIInt size;
10703: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
10704: if (size == 1) {
10705: MatHasOperation(mat,MATOP_CREATE_SUBMATRICES,has);
10706: }
10707: }
10708: }
10709: }
10710: return(0);
10711: }