Actual source code: mpidense.c
petsc-3.14.3 2021-01-09
2: /*
3: Basic functions for basic parallel dense matrices.
4: */
6: #include <../src/mat/impls/dense/mpi/mpidense.h>
7: #include <../src/mat/impls/aij/mpi/mpiaij.h>
8: #include <petscblaslapack.h>
10: /*@
12: MatDenseGetLocalMatrix - For a MATMPIDENSE or MATSEQDENSE matrix returns the sequential
13: matrix that represents the operator. For sequential matrices it returns itself.
15: Input Parameter:
16: . A - the Seq or MPI dense matrix
18: Output Parameter:
19: . B - the inner matrix
21: Level: intermediate
23: @*/
24: PetscErrorCode MatDenseGetLocalMatrix(Mat A,Mat *B)
25: {
26: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
28: PetscBool flg;
33: PetscObjectBaseTypeCompare((PetscObject)A,MATMPIDENSE,&flg);
34: if (flg) *B = mat->A;
35: else {
36: PetscObjectBaseTypeCompare((PetscObject)A,MATSEQDENSE,&flg);
37: if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not for matrix type %s",((PetscObject)A)->type_name);
38: *B = A;
39: }
40: return(0);
41: }
43: PetscErrorCode MatGetRow_MPIDense(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
44: {
45: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
47: PetscInt lrow,rstart = A->rmap->rstart,rend = A->rmap->rend;
50: if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"only local rows");
51: lrow = row - rstart;
52: MatGetRow(mat->A,lrow,nz,(const PetscInt**)idx,(const PetscScalar**)v);
53: return(0);
54: }
56: PetscErrorCode MatRestoreRow_MPIDense(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
57: {
58: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
60: PetscInt lrow,rstart = A->rmap->rstart,rend = A->rmap->rend;
63: if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"only local rows");
64: lrow = row - rstart;
65: MatRestoreRow(mat->A,lrow,nz,(const PetscInt**)idx,(const PetscScalar**)v);
66: return(0);
67: }
69: PetscErrorCode MatGetDiagonalBlock_MPIDense(Mat A,Mat *a)
70: {
71: Mat_MPIDense *mdn = (Mat_MPIDense*)A->data;
73: PetscInt m = A->rmap->n,rstart = A->rmap->rstart;
74: PetscScalar *array;
75: MPI_Comm comm;
76: PetscBool flg;
77: Mat B;
80: MatHasCongruentLayouts(A,&flg);
81: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only square matrices supported.");
82: PetscObjectQuery((PetscObject)A,"DiagonalBlock",(PetscObject*)&B);
83: if (!B) { /* This should use MatDenseGetSubMatrix (not create), but we would need a call like MatRestoreDiagonalBlock */
85: PetscObjectTypeCompare((PetscObject)mdn->A,MATSEQDENSECUDA,&flg);
86: if (flg) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not coded for %s. Send an email to petsc-dev@mcs.anl.gov to request this feature",MATSEQDENSECUDA);
87: PetscObjectGetComm((PetscObject)(mdn->A),&comm);
88: MatCreate(comm,&B);
89: MatSetSizes(B,m,m,m,m);
90: MatSetType(B,((PetscObject)mdn->A)->type_name);
91: MatDenseGetArrayRead(mdn->A,(const PetscScalar**)&array);
92: MatSeqDenseSetPreallocation(B,array+m*rstart);
93: MatDenseRestoreArrayRead(mdn->A,(const PetscScalar**)&array);
94: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
95: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
96: PetscObjectCompose((PetscObject)A,"DiagonalBlock",(PetscObject)B);
97: *a = B;
98: MatDestroy(&B);
99: } else *a = B;
100: return(0);
101: }
103: PetscErrorCode MatSetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
104: {
105: Mat_MPIDense *A = (Mat_MPIDense*)mat->data;
107: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend,row;
108: PetscBool roworiented = A->roworiented;
111: for (i=0; i<m; i++) {
112: if (idxm[i] < 0) continue;
113: if (idxm[i] >= mat->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
114: if (idxm[i] >= rstart && idxm[i] < rend) {
115: row = idxm[i] - rstart;
116: if (roworiented) {
117: MatSetValues(A->A,1,&row,n,idxn,v+i*n,addv);
118: } else {
119: for (j=0; j<n; j++) {
120: if (idxn[j] < 0) continue;
121: if (idxn[j] >= mat->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
122: MatSetValues(A->A,1,&row,1,&idxn[j],v+i+j*m,addv);
123: }
124: }
125: } else if (!A->donotstash) {
126: mat->assembled = PETSC_FALSE;
127: if (roworiented) {
128: MatStashValuesRow_Private(&mat->stash,idxm[i],n,idxn,v+i*n,PETSC_FALSE);
129: } else {
130: MatStashValuesCol_Private(&mat->stash,idxm[i],n,idxn,v+i,m,PETSC_FALSE);
131: }
132: }
133: }
134: return(0);
135: }
137: PetscErrorCode MatGetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
138: {
139: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
141: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend,row;
144: for (i=0; i<m; i++) {
145: if (idxm[i] < 0) continue; /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row"); */
146: if (idxm[i] >= mat->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
147: if (idxm[i] >= rstart && idxm[i] < rend) {
148: row = idxm[i] - rstart;
149: for (j=0; j<n; j++) {
150: if (idxn[j] < 0) continue; /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column"); */
151: if (idxn[j] >= mat->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
152: MatGetValues(mdn->A,1,&row,1,&idxn[j],v+i*n+j);
153: }
154: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
155: }
156: return(0);
157: }
159: static PetscErrorCode MatDenseGetLDA_MPIDense(Mat A,PetscInt *lda)
160: {
161: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
165: MatDenseGetLDA(a->A,lda);
166: return(0);
167: }
169: static PetscErrorCode MatDenseSetLDA_MPIDense(Mat A,PetscInt lda)
170: {
171: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
172: PetscBool iscuda;
176: if (!a->A) {
177: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
178: PetscLayoutSetUp(A->rmap);
179: PetscLayoutSetUp(A->cmap);
180: MatCreate(PETSC_COMM_SELF,&a->A);
181: PetscLogObjectParent((PetscObject)A,(PetscObject)a->A);
182: MatSetSizes(a->A,A->rmap->n,A->cmap->N,A->rmap->n,A->cmap->N);
183: PetscObjectTypeCompare((PetscObject)A,MATMPIDENSECUDA,&iscuda);
184: MatSetType(a->A,iscuda ? MATSEQDENSECUDA : MATSEQDENSE);
185: }
186: MatDenseSetLDA(a->A,lda);
187: return(0);
188: }
190: static PetscErrorCode MatDenseGetArray_MPIDense(Mat A,PetscScalar **array)
191: {
192: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
196: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
197: MatDenseGetArray(a->A,array);
198: return(0);
199: }
201: static PetscErrorCode MatDenseGetArrayRead_MPIDense(Mat A,const PetscScalar **array)
202: {
203: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
207: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
208: MatDenseGetArrayRead(a->A,array);
209: return(0);
210: }
212: static PetscErrorCode MatDenseGetArrayWrite_MPIDense(Mat A,PetscScalar **array)
213: {
214: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
218: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
219: MatDenseGetArrayWrite(a->A,array);
220: return(0);
221: }
223: static PetscErrorCode MatDensePlaceArray_MPIDense(Mat A,const PetscScalar *array)
224: {
225: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
229: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
230: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
231: MatDensePlaceArray(a->A,array);
232: return(0);
233: }
235: static PetscErrorCode MatDenseResetArray_MPIDense(Mat A)
236: {
237: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
241: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
242: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
243: MatDenseResetArray(a->A);
244: return(0);
245: }
247: static PetscErrorCode MatDenseReplaceArray_MPIDense(Mat A,const PetscScalar *array)
248: {
249: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
253: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
254: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
255: MatDenseReplaceArray(a->A,array);
256: return(0);
257: }
259: static PetscErrorCode MatCreateSubMatrix_MPIDense(Mat A,IS isrow,IS iscol,MatReuse scall,Mat *B)
260: {
261: Mat_MPIDense *mat = (Mat_MPIDense*)A->data,*newmatd;
262: PetscErrorCode ierr;
263: PetscInt lda,i,j,rstart,rend,nrows,ncols,Ncols,nlrows,nlcols;
264: const PetscInt *irow,*icol;
265: const PetscScalar *v;
266: PetscScalar *bv;
267: Mat newmat;
268: IS iscol_local;
269: MPI_Comm comm_is,comm_mat;
272: PetscObjectGetComm((PetscObject)A,&comm_mat);
273: PetscObjectGetComm((PetscObject)iscol,&comm_is);
274: if (comm_mat != comm_is) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NOTSAMECOMM,"IS communicator must match matrix communicator");
276: ISAllGather(iscol,&iscol_local);
277: ISGetIndices(isrow,&irow);
278: ISGetIndices(iscol_local,&icol);
279: ISGetLocalSize(isrow,&nrows);
280: ISGetLocalSize(iscol,&ncols);
281: ISGetSize(iscol,&Ncols); /* global number of columns, size of iscol_local */
283: /* No parallel redistribution currently supported! Should really check each index set
284: to comfirm that it is OK. ... Currently supports only submatrix same partitioning as
285: original matrix! */
287: MatGetLocalSize(A,&nlrows,&nlcols);
288: MatGetOwnershipRange(A,&rstart,&rend);
290: /* Check submatrix call */
291: if (scall == MAT_REUSE_MATRIX) {
292: /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Reused submatrix wrong size"); */
293: /* Really need to test rows and column sizes! */
294: newmat = *B;
295: } else {
296: /* Create and fill new matrix */
297: MatCreate(PetscObjectComm((PetscObject)A),&newmat);
298: MatSetSizes(newmat,nrows,ncols,PETSC_DECIDE,Ncols);
299: MatSetType(newmat,((PetscObject)A)->type_name);
300: MatMPIDenseSetPreallocation(newmat,NULL);
301: }
303: /* Now extract the data pointers and do the copy, column at a time */
304: newmatd = (Mat_MPIDense*)newmat->data;
305: MatDenseGetArray(newmatd->A,&bv);
306: MatDenseGetArrayRead(mat->A,&v);
307: MatDenseGetLDA(mat->A,&lda);
308: for (i=0; i<Ncols; i++) {
309: const PetscScalar *av = v + lda*icol[i];
310: for (j=0; j<nrows; j++) {
311: *bv++ = av[irow[j] - rstart];
312: }
313: }
314: MatDenseRestoreArrayRead(mat->A,&v);
315: MatDenseRestoreArray(newmatd->A,&bv);
317: /* Assemble the matrices so that the correct flags are set */
318: MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
319: MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
321: /* Free work space */
322: ISRestoreIndices(isrow,&irow);
323: ISRestoreIndices(iscol_local,&icol);
324: ISDestroy(&iscol_local);
325: *B = newmat;
326: return(0);
327: }
329: PetscErrorCode MatDenseRestoreArray_MPIDense(Mat A,PetscScalar **array)
330: {
331: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
335: MatDenseRestoreArray(a->A,array);
336: return(0);
337: }
339: PetscErrorCode MatDenseRestoreArrayRead_MPIDense(Mat A,const PetscScalar **array)
340: {
341: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
345: MatDenseRestoreArrayRead(a->A,array);
346: return(0);
347: }
349: PetscErrorCode MatDenseRestoreArrayWrite_MPIDense(Mat A,PetscScalar **array)
350: {
351: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
355: MatDenseRestoreArrayWrite(a->A,array);
356: return(0);
357: }
359: PetscErrorCode MatAssemblyBegin_MPIDense(Mat mat,MatAssemblyType mode)
360: {
361: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
363: PetscInt nstash,reallocs;
366: if (mdn->donotstash || mat->nooffprocentries) return(0);
368: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
369: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
370: PetscInfo2(mdn->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
371: return(0);
372: }
374: PetscErrorCode MatAssemblyEnd_MPIDense(Mat mat,MatAssemblyType mode)
375: {
376: Mat_MPIDense *mdn=(Mat_MPIDense*)mat->data;
378: PetscInt i,*row,*col,flg,j,rstart,ncols;
379: PetscMPIInt n;
380: PetscScalar *val;
383: if (!mdn->donotstash && !mat->nooffprocentries) {
384: /* wait on receives */
385: while (1) {
386: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
387: if (!flg) break;
389: for (i=0; i<n;) {
390: /* Now identify the consecutive vals belonging to the same row */
391: for (j=i,rstart=row[j]; j<n; j++) {
392: if (row[j] != rstart) break;
393: }
394: if (j < n) ncols = j-i;
395: else ncols = n-i;
396: /* Now assemble all these values with a single function call */
397: MatSetValues_MPIDense(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
398: i = j;
399: }
400: }
401: MatStashScatterEnd_Private(&mat->stash);
402: }
404: MatAssemblyBegin(mdn->A,mode);
405: MatAssemblyEnd(mdn->A,mode);
407: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
408: MatSetUpMultiply_MPIDense(mat);
409: }
410: return(0);
411: }
413: PetscErrorCode MatZeroEntries_MPIDense(Mat A)
414: {
416: Mat_MPIDense *l = (Mat_MPIDense*)A->data;
419: MatZeroEntries(l->A);
420: return(0);
421: }
423: PetscErrorCode MatZeroRows_MPIDense(Mat A,PetscInt n,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
424: {
425: Mat_MPIDense *l = (Mat_MPIDense*)A->data;
426: PetscErrorCode ierr;
427: PetscInt i,len,*lrows;
430: /* get locally owned rows */
431: PetscLayoutMapLocal(A->rmap,n,rows,&len,&lrows,NULL);
432: /* fix right hand side if needed */
433: if (x && b) {
434: const PetscScalar *xx;
435: PetscScalar *bb;
437: VecGetArrayRead(x, &xx);
438: VecGetArrayWrite(b, &bb);
439: for (i=0;i<len;++i) bb[lrows[i]] = diag*xx[lrows[i]];
440: VecRestoreArrayRead(x, &xx);
441: VecRestoreArrayWrite(b, &bb);
442: }
443: MatZeroRows(l->A,len,lrows,0.0,NULL,NULL);
444: if (diag != 0.0) {
445: Vec d;
447: MatCreateVecs(A,NULL,&d);
448: VecSet(d,diag);
449: MatDiagonalSet(A,d,INSERT_VALUES);
450: VecDestroy(&d);
451: }
452: PetscFree(lrows);
453: return(0);
454: }
456: PETSC_INTERN PetscErrorCode MatMult_SeqDense(Mat,Vec,Vec);
457: PETSC_INTERN PetscErrorCode MatMultAdd_SeqDense(Mat,Vec,Vec,Vec);
458: PETSC_INTERN PetscErrorCode MatMultTranspose_SeqDense(Mat,Vec,Vec);
459: PETSC_INTERN PetscErrorCode MatMultTransposeAdd_SeqDense(Mat,Vec,Vec,Vec);
461: PetscErrorCode MatMult_MPIDense(Mat mat,Vec xx,Vec yy)
462: {
463: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
464: PetscErrorCode ierr;
465: const PetscScalar *ax;
466: PetscScalar *ay;
467: PetscMemType axmtype,aymtype;
470: VecGetArrayReadInPlace_Internal(xx,&ax,&axmtype);
471: VecGetArrayInPlace_Internal(mdn->lvec,&ay,&aymtype);
472: PetscSFBcastWithMemTypeBegin(mdn->Mvctx,MPIU_SCALAR,axmtype,ax,aymtype,ay);
473: PetscSFBcastEnd(mdn->Mvctx,MPIU_SCALAR,ax,ay);
474: VecRestoreArrayInPlace(mdn->lvec,&ay);
475: VecRestoreArrayReadInPlace(xx,&ax);
476: (*mdn->A->ops->mult)(mdn->A,mdn->lvec,yy);
477: return(0);
478: }
480: PetscErrorCode MatMultAdd_MPIDense(Mat mat,Vec xx,Vec yy,Vec zz)
481: {
482: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
483: PetscErrorCode ierr;
484: const PetscScalar *ax;
485: PetscScalar *ay;
486: PetscMemType axmtype,aymtype;
489: VecGetArrayReadInPlace_Internal(xx,&ax,&axmtype);
490: VecGetArrayInPlace_Internal(mdn->lvec,&ay,&aymtype);
491: PetscSFBcastWithMemTypeBegin(mdn->Mvctx,MPIU_SCALAR,axmtype,ax,aymtype,ay);
492: PetscSFBcastEnd(mdn->Mvctx,MPIU_SCALAR,ax,ay);
493: VecRestoreArrayInPlace(mdn->lvec,&ay);
494: VecRestoreArrayReadInPlace(xx,&ax);
495: (*mdn->A->ops->multadd)(mdn->A,mdn->lvec,yy,zz);
496: return(0);
497: }
499: PetscErrorCode MatMultTranspose_MPIDense(Mat A,Vec xx,Vec yy)
500: {
501: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
502: PetscErrorCode ierr;
503: const PetscScalar *ax;
504: PetscScalar *ay;
505: PetscMemType axmtype,aymtype;
508: VecSet(yy,0.0);
509: (*a->A->ops->multtranspose)(a->A,xx,a->lvec);
510: VecGetArrayReadInPlace_Internal(a->lvec,&ax,&axmtype);
511: VecGetArrayInPlace_Internal(yy,&ay,&aymtype);
512: PetscSFReduceWithMemTypeBegin(a->Mvctx,MPIU_SCALAR,axmtype,ax,aymtype,ay,MPIU_SUM);
513: PetscSFReduceEnd(a->Mvctx,MPIU_SCALAR,ax,ay,MPIU_SUM);
514: VecRestoreArrayReadInPlace(a->lvec,&ax);
515: VecRestoreArrayInPlace(yy,&ay);
516: return(0);
517: }
519: PetscErrorCode MatMultTransposeAdd_MPIDense(Mat A,Vec xx,Vec yy,Vec zz)
520: {
521: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
522: PetscErrorCode ierr;
523: const PetscScalar *ax;
524: PetscScalar *ay;
525: PetscMemType axmtype,aymtype;
528: VecCopy(yy,zz);
529: (*a->A->ops->multtranspose)(a->A,xx,a->lvec);
530: VecGetArrayReadInPlace_Internal(a->lvec,&ax,&axmtype);
531: VecGetArrayInPlace_Internal(zz,&ay,&aymtype);
532: PetscSFReduceWithMemTypeBegin(a->Mvctx,MPIU_SCALAR,axmtype,ax,aymtype,ay,MPIU_SUM);
533: PetscSFReduceEnd(a->Mvctx,MPIU_SCALAR,ax,ay,MPIU_SUM);
534: VecRestoreArrayReadInPlace(a->lvec,&ax);
535: VecRestoreArrayInPlace(zz,&ay);
536: return(0);
537: }
539: PetscErrorCode MatGetDiagonal_MPIDense(Mat A,Vec v)
540: {
541: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
542: PetscErrorCode ierr;
543: PetscInt lda,len,i,n,m = A->rmap->n,radd;
544: PetscScalar *x,zero = 0.0;
545: const PetscScalar *av;
548: VecSet(v,zero);
549: VecGetArray(v,&x);
550: VecGetSize(v,&n);
551: if (n != A->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Nonconforming mat and vec");
552: len = PetscMin(a->A->rmap->n,a->A->cmap->n);
553: radd = A->rmap->rstart*m;
554: MatDenseGetArrayRead(a->A,&av);
555: MatDenseGetLDA(a->A,&lda);
556: for (i=0; i<len; i++) {
557: x[i] = av[radd + i*lda + i];
558: }
559: MatDenseRestoreArrayRead(a->A,&av);
560: VecRestoreArray(v,&x);
561: return(0);
562: }
564: PetscErrorCode MatDestroy_MPIDense(Mat mat)
565: {
566: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
570: #if defined(PETSC_USE_LOG)
571: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
572: #endif
573: MatStashDestroy_Private(&mat->stash);
574: if (mdn->vecinuse) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
575: if (mdn->matinuse) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
576: MatDestroy(&mdn->A);
577: VecDestroy(&mdn->lvec);
578: PetscSFDestroy(&mdn->Mvctx);
579: VecDestroy(&mdn->cvec);
580: MatDestroy(&mdn->cmat);
582: PetscFree(mat->data);
583: PetscObjectChangeTypeName((PetscObject)mat,NULL);
585: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetLDA_C",NULL);
586: PetscObjectComposeFunction((PetscObject)mat,"MatDenseSetLDA_C",NULL);
587: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetArray_C",NULL);
588: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreArray_C",NULL);
589: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetArrayRead_C",NULL);
590: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreArrayRead_C",NULL);
591: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetArrayWrite_C",NULL);
592: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreArrayWrite_C",NULL);
593: PetscObjectComposeFunction((PetscObject)mat,"MatDensePlaceArray_C",NULL);
594: PetscObjectComposeFunction((PetscObject)mat,"MatDenseResetArray_C",NULL);
595: PetscObjectComposeFunction((PetscObject)mat,"MatDenseReplaceArray_C",NULL);
596: #if defined(PETSC_HAVE_ELEMENTAL)
597: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpidense_elemental_C",NULL);
598: #endif
599: #if defined(PETSC_HAVE_SCALAPACK)
600: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpidense_scalapack_C",NULL);
601: #endif
602: PetscObjectComposeFunction((PetscObject)mat,"MatMPIDenseSetPreallocation_C",NULL);
603: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaij_mpidense_C",NULL);
604: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpidense_mpiaij_C",NULL);
605: #if defined (PETSC_HAVE_CUDA)
606: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaijcusparse_mpidense_C",NULL);
607: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpidense_mpiaijcusparse_C",NULL);
608: #endif
609: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumn_C",NULL);
610: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumn_C",NULL);
611: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVec_C",NULL);
612: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVec_C",NULL);
613: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVecRead_C",NULL);
614: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVecRead_C",NULL);
615: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVecWrite_C",NULL);
616: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVecWrite_C",NULL);
617: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetSubMatrix_C",NULL);
618: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreSubMatrix_C",NULL);
619: return(0);
620: }
622: PETSC_INTERN PetscErrorCode MatView_SeqDense(Mat,PetscViewer);
624: #include <petscdraw.h>
625: static PetscErrorCode MatView_MPIDense_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
626: {
627: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
628: PetscErrorCode ierr;
629: PetscMPIInt rank;
630: PetscViewerType vtype;
631: PetscBool iascii,isdraw;
632: PetscViewer sviewer;
633: PetscViewerFormat format;
636: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
637: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
638: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
639: if (iascii) {
640: PetscViewerGetType(viewer,&vtype);
641: PetscViewerGetFormat(viewer,&format);
642: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
643: MatInfo info;
644: MatGetInfo(mat,MAT_LOCAL,&info);
645: PetscViewerASCIIPushSynchronized(viewer);
646: PetscViewerASCIISynchronizedPrintf(viewer," [%d] local rows %D nz %D nz alloced %D mem %D \n",rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
647: PetscViewerFlush(viewer);
648: PetscViewerASCIIPopSynchronized(viewer);
649: PetscSFView(mdn->Mvctx,viewer);
650: return(0);
651: } else if (format == PETSC_VIEWER_ASCII_INFO) {
652: return(0);
653: }
654: } else if (isdraw) {
655: PetscDraw draw;
656: PetscBool isnull;
658: PetscViewerDrawGetDraw(viewer,0,&draw);
659: PetscDrawIsNull(draw,&isnull);
660: if (isnull) return(0);
661: }
663: {
664: /* assemble the entire matrix onto first processor. */
665: Mat A;
666: PetscInt M = mat->rmap->N,N = mat->cmap->N,m,row,i,nz;
667: PetscInt *cols;
668: PetscScalar *vals;
670: MatCreate(PetscObjectComm((PetscObject)mat),&A);
671: if (!rank) {
672: MatSetSizes(A,M,N,M,N);
673: } else {
674: MatSetSizes(A,0,0,M,N);
675: }
676: /* Since this is a temporary matrix, MATMPIDENSE instead of ((PetscObject)A)->type_name here is probably acceptable. */
677: MatSetType(A,MATMPIDENSE);
678: MatMPIDenseSetPreallocation(A,NULL);
679: PetscLogObjectParent((PetscObject)mat,(PetscObject)A);
681: /* Copy the matrix ... This isn't the most efficient means,
682: but it's quick for now */
683: A->insertmode = INSERT_VALUES;
685: row = mat->rmap->rstart;
686: m = mdn->A->rmap->n;
687: for (i=0; i<m; i++) {
688: MatGetRow_MPIDense(mat,row,&nz,&cols,&vals);
689: MatSetValues_MPIDense(A,1,&row,nz,cols,vals,INSERT_VALUES);
690: MatRestoreRow_MPIDense(mat,row,&nz,&cols,&vals);
691: row++;
692: }
694: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
695: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
696: PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
697: if (!rank) {
698: PetscObjectSetName((PetscObject)((Mat_MPIDense*)(A->data))->A,((PetscObject)mat)->name);
699: MatView_SeqDense(((Mat_MPIDense*)(A->data))->A,sviewer);
700: }
701: PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
702: PetscViewerFlush(viewer);
703: MatDestroy(&A);
704: }
705: return(0);
706: }
708: PetscErrorCode MatView_MPIDense(Mat mat,PetscViewer viewer)
709: {
711: PetscBool iascii,isbinary,isdraw,issocket;
714: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
715: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
716: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
717: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
719: if (iascii || issocket || isdraw) {
720: MatView_MPIDense_ASCIIorDraworSocket(mat,viewer);
721: } else if (isbinary) {
722: MatView_Dense_Binary(mat,viewer);
723: }
724: return(0);
725: }
727: PetscErrorCode MatGetInfo_MPIDense(Mat A,MatInfoType flag,MatInfo *info)
728: {
729: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
730: Mat mdn = mat->A;
732: PetscLogDouble isend[5],irecv[5];
735: info->block_size = 1.0;
737: MatGetInfo(mdn,MAT_LOCAL,info);
739: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
740: isend[3] = info->memory; isend[4] = info->mallocs;
741: if (flag == MAT_LOCAL) {
742: info->nz_used = isend[0];
743: info->nz_allocated = isend[1];
744: info->nz_unneeded = isend[2];
745: info->memory = isend[3];
746: info->mallocs = isend[4];
747: } else if (flag == MAT_GLOBAL_MAX) {
748: MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_MAX,PetscObjectComm((PetscObject)A));
750: info->nz_used = irecv[0];
751: info->nz_allocated = irecv[1];
752: info->nz_unneeded = irecv[2];
753: info->memory = irecv[3];
754: info->mallocs = irecv[4];
755: } else if (flag == MAT_GLOBAL_SUM) {
756: MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_SUM,PetscObjectComm((PetscObject)A));
758: info->nz_used = irecv[0];
759: info->nz_allocated = irecv[1];
760: info->nz_unneeded = irecv[2];
761: info->memory = irecv[3];
762: info->mallocs = irecv[4];
763: }
764: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
765: info->fill_ratio_needed = 0;
766: info->factor_mallocs = 0;
767: return(0);
768: }
770: PetscErrorCode MatSetOption_MPIDense(Mat A,MatOption op,PetscBool flg)
771: {
772: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
776: switch (op) {
777: case MAT_NEW_NONZERO_LOCATIONS:
778: case MAT_NEW_NONZERO_LOCATION_ERR:
779: case MAT_NEW_NONZERO_ALLOCATION_ERR:
780: MatCheckPreallocated(A,1);
781: MatSetOption(a->A,op,flg);
782: break;
783: case MAT_ROW_ORIENTED:
784: MatCheckPreallocated(A,1);
785: a->roworiented = flg;
786: MatSetOption(a->A,op,flg);
787: break;
788: case MAT_NEW_DIAGONALS:
789: case MAT_KEEP_NONZERO_PATTERN:
790: case MAT_USE_HASH_TABLE:
791: case MAT_SORTED_FULL:
792: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
793: break;
794: case MAT_IGNORE_OFF_PROC_ENTRIES:
795: a->donotstash = flg;
796: break;
797: case MAT_SYMMETRIC:
798: case MAT_STRUCTURALLY_SYMMETRIC:
799: case MAT_HERMITIAN:
800: case MAT_SYMMETRY_ETERNAL:
801: case MAT_IGNORE_LOWER_TRIANGULAR:
802: case MAT_IGNORE_ZERO_ENTRIES:
803: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
804: break;
805: default:
806: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %s",MatOptions[op]);
807: }
808: return(0);
809: }
811: PetscErrorCode MatDiagonalScale_MPIDense(Mat A,Vec ll,Vec rr)
812: {
813: Mat_MPIDense *mdn = (Mat_MPIDense*)A->data;
814: const PetscScalar *l;
815: PetscScalar x,*v,*vv,*r;
816: PetscErrorCode ierr;
817: PetscInt i,j,s2a,s3a,s2,s3,m=mdn->A->rmap->n,n=mdn->A->cmap->n,lda;
820: MatDenseGetArray(mdn->A,&vv);
821: MatDenseGetLDA(mdn->A,&lda);
822: MatGetLocalSize(A,&s2,&s3);
823: if (ll) {
824: VecGetLocalSize(ll,&s2a);
825: if (s2a != s2) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Left scaling vector non-conforming local size, %D != %D", s2a, s2);
826: VecGetArrayRead(ll,&l);
827: for (i=0; i<m; i++) {
828: x = l[i];
829: v = vv + i;
830: for (j=0; j<n; j++) { (*v) *= x; v+= lda;}
831: }
832: VecRestoreArrayRead(ll,&l);
833: PetscLogFlops(1.0*n*m);
834: }
835: if (rr) {
836: const PetscScalar *ar;
838: VecGetLocalSize(rr,&s3a);
839: if (s3a != s3) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Right scaling vec non-conforming local size, %d != %d.", s3a, s3);
840: VecGetArrayRead(rr,&ar);
841: VecGetArray(mdn->lvec,&r);
842: PetscSFBcastBegin(mdn->Mvctx,MPIU_SCALAR,ar,r);
843: PetscSFBcastEnd(mdn->Mvctx,MPIU_SCALAR,ar,r);
844: VecRestoreArrayRead(rr,&ar);
845: for (i=0; i<n; i++) {
846: x = r[i];
847: v = vv + i*lda;
848: for (j=0; j<m; j++) (*v++) *= x;
849: }
850: VecRestoreArray(mdn->lvec,&r);
851: PetscLogFlops(1.0*n*m);
852: }
853: MatDenseRestoreArray(mdn->A,&vv);
854: return(0);
855: }
857: PetscErrorCode MatNorm_MPIDense(Mat A,NormType type,PetscReal *nrm)
858: {
859: Mat_MPIDense *mdn = (Mat_MPIDense*)A->data;
860: PetscErrorCode ierr;
861: PetscInt i,j;
862: PetscMPIInt size;
863: PetscReal sum = 0.0;
864: const PetscScalar *av,*v;
867: MatDenseGetArrayRead(mdn->A,&av);
868: v = av;
869: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
870: if (size == 1) {
871: MatNorm(mdn->A,type,nrm);
872: } else {
873: if (type == NORM_FROBENIUS) {
874: for (i=0; i<mdn->A->cmap->n*mdn->A->rmap->n; i++) {
875: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
876: }
877: MPIU_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
878: *nrm = PetscSqrtReal(*nrm);
879: PetscLogFlops(2.0*mdn->A->cmap->n*mdn->A->rmap->n);
880: } else if (type == NORM_1) {
881: PetscReal *tmp,*tmp2;
882: PetscCalloc2(A->cmap->N,&tmp,A->cmap->N,&tmp2);
883: *nrm = 0.0;
884: v = av;
885: for (j=0; j<mdn->A->cmap->n; j++) {
886: for (i=0; i<mdn->A->rmap->n; i++) {
887: tmp[j] += PetscAbsScalar(*v); v++;
888: }
889: }
890: MPIU_Allreduce(tmp,tmp2,A->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
891: for (j=0; j<A->cmap->N; j++) {
892: if (tmp2[j] > *nrm) *nrm = tmp2[j];
893: }
894: PetscFree2(tmp,tmp2);
895: PetscLogFlops(A->cmap->n*A->rmap->n);
896: } else if (type == NORM_INFINITY) { /* max row norm */
897: PetscReal ntemp;
898: MatNorm(mdn->A,type,&ntemp);
899: MPIU_Allreduce(&ntemp,nrm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
900: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"No support for two norm");
901: }
902: MatDenseRestoreArrayRead(mdn->A,&av);
903: return(0);
904: }
906: PetscErrorCode MatTranspose_MPIDense(Mat A,MatReuse reuse,Mat *matout)
907: {
908: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
909: Mat B;
910: PetscInt M = A->rmap->N,N = A->cmap->N,m,n,*rwork,rstart = A->rmap->rstart;
912: PetscInt j,i,lda;
913: PetscScalar *v;
916: if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_INPLACE_MATRIX) {
917: MatCreate(PetscObjectComm((PetscObject)A),&B);
918: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
919: MatSetType(B,((PetscObject)A)->type_name);
920: MatMPIDenseSetPreallocation(B,NULL);
921: } else B = *matout;
923: m = a->A->rmap->n; n = a->A->cmap->n;
924: MatDenseGetArrayRead(a->A,(const PetscScalar**)&v);
925: MatDenseGetLDA(a->A,&lda);
926: PetscMalloc1(m,&rwork);
927: for (i=0; i<m; i++) rwork[i] = rstart + i;
928: for (j=0; j<n; j++) {
929: MatSetValues(B,1,&j,m,rwork,v,INSERT_VALUES);
930: v += lda;
931: }
932: MatDenseRestoreArrayRead(a->A,(const PetscScalar**)&v);
933: PetscFree(rwork);
934: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
935: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
936: if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
937: *matout = B;
938: } else {
939: MatHeaderMerge(A,&B);
940: }
941: return(0);
942: }
944: static PetscErrorCode MatDuplicate_MPIDense(Mat,MatDuplicateOption,Mat*);
945: PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat,PetscScalar);
947: PetscErrorCode MatSetUp_MPIDense(Mat A)
948: {
952: PetscLayoutSetUp(A->rmap);
953: PetscLayoutSetUp(A->cmap);
954: if (!A->preallocated) {
955: MatMPIDenseSetPreallocation(A,NULL);
956: }
957: return(0);
958: }
960: PetscErrorCode MatAXPY_MPIDense(Mat Y,PetscScalar alpha,Mat X,MatStructure str)
961: {
963: Mat_MPIDense *A = (Mat_MPIDense*)Y->data, *B = (Mat_MPIDense*)X->data;
966: MatAXPY(A->A,alpha,B->A,str);
967: return(0);
968: }
970: PetscErrorCode MatConjugate_MPIDense(Mat mat)
971: {
972: Mat_MPIDense *a = (Mat_MPIDense*)mat->data;
976: MatConjugate(a->A);
977: return(0);
978: }
980: PetscErrorCode MatRealPart_MPIDense(Mat A)
981: {
982: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
986: MatRealPart(a->A);
987: return(0);
988: }
990: PetscErrorCode MatImaginaryPart_MPIDense(Mat A)
991: {
992: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
996: MatImaginaryPart(a->A);
997: return(0);
998: }
1000: static PetscErrorCode MatGetColumnVector_MPIDense(Mat A,Vec v,PetscInt col)
1001: {
1003: Mat_MPIDense *a = (Mat_MPIDense*) A->data;
1006: if (!a->A) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Missing local matrix");
1007: if (!a->A->ops->getcolumnvector) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Missing get column operation");
1008: (*a->A->ops->getcolumnvector)(a->A,v,col);
1009: return(0);
1010: }
1012: PETSC_INTERN PetscErrorCode MatGetColumnNorms_SeqDense(Mat,NormType,PetscReal*);
1014: PetscErrorCode MatGetColumnNorms_MPIDense(Mat A,NormType type,PetscReal *norms)
1015: {
1017: PetscInt i,n;
1018: Mat_MPIDense *a = (Mat_MPIDense*) A->data;
1019: PetscReal *work;
1022: MatGetSize(A,NULL,&n);
1023: PetscMalloc1(n,&work);
1024: MatGetColumnNorms_SeqDense(a->A,type,work);
1025: if (type == NORM_2) {
1026: for (i=0; i<n; i++) work[i] *= work[i];
1027: }
1028: if (type == NORM_INFINITY) {
1029: MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,A->hdr.comm);
1030: } else {
1031: MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,A->hdr.comm);
1032: }
1033: PetscFree(work);
1034: if (type == NORM_2) {
1035: for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
1036: }
1037: return(0);
1038: }
1040: #if defined(PETSC_HAVE_CUDA)
1041: static PetscErrorCode MatDenseGetColumnVec_MPIDenseCUDA(Mat A,PetscInt col,Vec *v)
1042: {
1043: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1045: PetscInt lda;
1048: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1049: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1050: if (!a->cvec) {
1051: VecCreateMPICUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,A->rmap->N,NULL,&a->cvec);
1052: PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);
1053: }
1054: a->vecinuse = col + 1;
1055: MatDenseGetLDA(a->A,&lda);
1056: MatDenseCUDAGetArray(a->A,(PetscScalar**)&a->ptrinuse);
1057: VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)lda);
1058: *v = a->cvec;
1059: return(0);
1060: }
1062: static PetscErrorCode MatDenseRestoreColumnVec_MPIDenseCUDA(Mat A,PetscInt col,Vec *v)
1063: {
1064: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1068: if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
1069: if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector");
1070: a->vecinuse = 0;
1071: MatDenseCUDARestoreArray(a->A,(PetscScalar**)&a->ptrinuse);
1072: VecCUDAResetArray(a->cvec);
1073: *v = NULL;
1074: return(0);
1075: }
1077: static PetscErrorCode MatDenseGetColumnVecRead_MPIDenseCUDA(Mat A,PetscInt col,Vec *v)
1078: {
1079: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1080: PetscInt lda;
1084: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1085: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1086: if (!a->cvec) {
1087: VecCreateMPICUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,A->rmap->N,NULL,&a->cvec);
1088: PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);
1089: }
1090: a->vecinuse = col + 1;
1091: MatDenseGetLDA(a->A,&lda);
1092: MatDenseCUDAGetArrayRead(a->A,&a->ptrinuse);
1093: VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)lda);
1094: VecLockReadPush(a->cvec);
1095: *v = a->cvec;
1096: return(0);
1097: }
1099: static PetscErrorCode MatDenseRestoreColumnVecRead_MPIDenseCUDA(Mat A,PetscInt col,Vec *v)
1100: {
1101: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1105: if (!a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
1106: if (!a->cvec) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing internal column vector");
1107: a->vecinuse = 0;
1108: MatDenseCUDARestoreArrayRead(a->A,&a->ptrinuse);
1109: VecLockReadPop(a->cvec);
1110: VecCUDAResetArray(a->cvec);
1111: *v = NULL;
1112: return(0);
1113: }
1115: static PetscErrorCode MatDenseGetColumnVecWrite_MPIDenseCUDA(Mat A,PetscInt col,Vec *v)
1116: {
1117: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1119: PetscInt lda;
1122: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1123: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1124: if (!a->cvec) {
1125: VecCreateMPICUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,A->rmap->N,NULL,&a->cvec);
1126: PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);
1127: }
1128: a->vecinuse = col + 1;
1129: MatDenseGetLDA(a->A,&lda);
1130: MatDenseCUDAGetArrayWrite(a->A,(PetscScalar**)&a->ptrinuse);
1131: VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)lda);
1132: *v = a->cvec;
1133: return(0);
1134: }
1136: static PetscErrorCode MatDenseRestoreColumnVecWrite_MPIDenseCUDA(Mat A,PetscInt col,Vec *v)
1137: {
1138: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1142: if (!a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
1143: if (!a->cvec) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing internal column vector");
1144: a->vecinuse = 0;
1145: MatDenseCUDARestoreArrayWrite(a->A,(PetscScalar**)&a->ptrinuse);
1146: VecCUDAResetArray(a->cvec);
1147: *v = NULL;
1148: return(0);
1149: }
1151: static PetscErrorCode MatDenseCUDAPlaceArray_MPIDenseCUDA(Mat A, const PetscScalar *a)
1152: {
1153: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1157: if (l->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1158: if (l->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1159: MatDenseCUDAPlaceArray(l->A,a);
1160: return(0);
1161: }
1163: static PetscErrorCode MatDenseCUDAResetArray_MPIDenseCUDA(Mat A)
1164: {
1165: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1169: if (l->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1170: if (l->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1171: MatDenseCUDAResetArray(l->A);
1172: return(0);
1173: }
1175: static PetscErrorCode MatDenseCUDAReplaceArray_MPIDenseCUDA(Mat A, const PetscScalar *a)
1176: {
1177: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1181: if (l->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1182: if (l->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1183: MatDenseCUDAReplaceArray(l->A,a);
1184: return(0);
1185: }
1187: static PetscErrorCode MatDenseCUDAGetArrayWrite_MPIDenseCUDA(Mat A, PetscScalar **a)
1188: {
1189: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1193: MatDenseCUDAGetArrayWrite(l->A,a);
1194: return(0);
1195: }
1197: static PetscErrorCode MatDenseCUDARestoreArrayWrite_MPIDenseCUDA(Mat A, PetscScalar **a)
1198: {
1199: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1203: MatDenseCUDARestoreArrayWrite(l->A,a);
1204: return(0);
1205: }
1207: static PetscErrorCode MatDenseCUDAGetArrayRead_MPIDenseCUDA(Mat A, const PetscScalar **a)
1208: {
1209: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1213: MatDenseCUDAGetArrayRead(l->A,a);
1214: return(0);
1215: }
1217: static PetscErrorCode MatDenseCUDARestoreArrayRead_MPIDenseCUDA(Mat A, const PetscScalar **a)
1218: {
1219: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1223: MatDenseCUDARestoreArrayRead(l->A,a);
1224: return(0);
1225: }
1227: static PetscErrorCode MatDenseCUDAGetArray_MPIDenseCUDA(Mat A, PetscScalar **a)
1228: {
1229: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1233: MatDenseCUDAGetArray(l->A,a);
1234: return(0);
1235: }
1237: static PetscErrorCode MatDenseCUDARestoreArray_MPIDenseCUDA(Mat A, PetscScalar **a)
1238: {
1239: Mat_MPIDense *l = (Mat_MPIDense*) A->data;
1243: MatDenseCUDARestoreArray(l->A,a);
1244: return(0);
1245: }
1247: static PetscErrorCode MatDenseGetColumnVecWrite_MPIDense(Mat,PetscInt,Vec*);
1248: static PetscErrorCode MatDenseGetColumnVecRead_MPIDense(Mat,PetscInt,Vec*);
1249: static PetscErrorCode MatDenseGetColumnVec_MPIDense(Mat,PetscInt,Vec*);
1250: static PetscErrorCode MatDenseRestoreColumnVecWrite_MPIDense(Mat,PetscInt,Vec*);
1251: static PetscErrorCode MatDenseRestoreColumnVecRead_MPIDense(Mat,PetscInt,Vec*);
1252: static PetscErrorCode MatDenseRestoreColumnVec_MPIDense(Mat,PetscInt,Vec*);
1253: static PetscErrorCode MatDenseRestoreSubMatrix_MPIDense(Mat,Mat*);
1255: static PetscErrorCode MatBindToCPU_MPIDenseCUDA(Mat mat,PetscBool bind)
1256: {
1257: Mat_MPIDense *d = (Mat_MPIDense*)mat->data;
1261: if (d->vecinuse) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1262: if (d->matinuse) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1263: if (d->A) {
1264: MatBindToCPU(d->A,bind);
1265: }
1266: mat->boundtocpu = bind;
1267: if (!bind) {
1268: PetscBool iscuda;
1270: PetscObjectTypeCompare((PetscObject)d->cvec,VECMPICUDA,&iscuda);
1271: if (!iscuda) {
1272: VecDestroy(&d->cvec);
1273: }
1274: PetscObjectTypeCompare((PetscObject)d->cmat,MATMPIDENSECUDA,&iscuda);
1275: if (!iscuda) {
1276: MatDestroy(&d->cmat);
1277: }
1278: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_MPIDenseCUDA);
1279: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_MPIDenseCUDA);
1280: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_MPIDenseCUDA);
1281: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_MPIDenseCUDA);
1282: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_MPIDenseCUDA);
1283: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_MPIDenseCUDA);
1284: } else {
1285: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_MPIDense);
1286: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_MPIDense);
1287: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_MPIDense);
1288: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_MPIDense);
1289: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_MPIDense);
1290: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_MPIDense);
1291: }
1292: if (d->cmat) {
1293: MatBindToCPU(d->cmat,bind);
1294: }
1295: return(0);
1296: }
1298: PetscErrorCode MatMPIDenseCUDASetPreallocation(Mat A, PetscScalar *d_data)
1299: {
1300: Mat_MPIDense *d = (Mat_MPIDense*)A->data;
1302: PetscBool iscuda;
1306: PetscObjectTypeCompare((PetscObject)A,MATMPIDENSECUDA,&iscuda);
1307: if (!iscuda) return(0);
1308: PetscLayoutSetUp(A->rmap);
1309: PetscLayoutSetUp(A->cmap);
1310: if (!d->A) {
1311: MatCreate(PETSC_COMM_SELF,&d->A);
1312: PetscLogObjectParent((PetscObject)A,(PetscObject)d->A);
1313: MatSetSizes(d->A,A->rmap->n,A->cmap->N,A->rmap->n,A->cmap->N);
1314: }
1315: MatSetType(d->A,MATSEQDENSECUDA);
1316: MatSeqDenseCUDASetPreallocation(d->A,d_data);
1317: A->preallocated = PETSC_TRUE;
1318: return(0);
1319: }
1320: #endif
1322: static PetscErrorCode MatSetRandom_MPIDense(Mat x,PetscRandom rctx)
1323: {
1324: Mat_MPIDense *d = (Mat_MPIDense*)x->data;
1328: MatSetRandom(d->A,rctx);
1329: return(0);
1330: }
1332: static PetscErrorCode MatMissingDiagonal_MPIDense(Mat A,PetscBool *missing,PetscInt *d)
1333: {
1335: *missing = PETSC_FALSE;
1336: return(0);
1337: }
1339: static PetscErrorCode MatMatTransposeMultSymbolic_MPIDense_MPIDense(Mat,Mat,PetscReal,Mat);
1340: static PetscErrorCode MatMatTransposeMultNumeric_MPIDense_MPIDense(Mat,Mat,Mat);
1341: static PetscErrorCode MatTransposeMatMultNumeric_MPIDense_MPIDense(Mat,Mat,Mat);
1342: static PetscErrorCode MatTransposeMatMultSymbolic_MPIDense_MPIDense(Mat,Mat,PetscReal,Mat);
1343: static PetscErrorCode MatEqual_MPIDense(Mat,Mat,PetscBool*);
1344: static PetscErrorCode MatLoad_MPIDense(Mat,PetscViewer);
1346: /* -------------------------------------------------------------------*/
1347: static struct _MatOps MatOps_Values = { MatSetValues_MPIDense,
1348: MatGetRow_MPIDense,
1349: MatRestoreRow_MPIDense,
1350: MatMult_MPIDense,
1351: /* 4*/ MatMultAdd_MPIDense,
1352: MatMultTranspose_MPIDense,
1353: MatMultTransposeAdd_MPIDense,
1354: NULL,
1355: NULL,
1356: NULL,
1357: /* 10*/ NULL,
1358: NULL,
1359: NULL,
1360: NULL,
1361: MatTranspose_MPIDense,
1362: /* 15*/ MatGetInfo_MPIDense,
1363: MatEqual_MPIDense,
1364: MatGetDiagonal_MPIDense,
1365: MatDiagonalScale_MPIDense,
1366: MatNorm_MPIDense,
1367: /* 20*/ MatAssemblyBegin_MPIDense,
1368: MatAssemblyEnd_MPIDense,
1369: MatSetOption_MPIDense,
1370: MatZeroEntries_MPIDense,
1371: /* 24*/ MatZeroRows_MPIDense,
1372: NULL,
1373: NULL,
1374: NULL,
1375: NULL,
1376: /* 29*/ MatSetUp_MPIDense,
1377: NULL,
1378: NULL,
1379: MatGetDiagonalBlock_MPIDense,
1380: NULL,
1381: /* 34*/ MatDuplicate_MPIDense,
1382: NULL,
1383: NULL,
1384: NULL,
1385: NULL,
1386: /* 39*/ MatAXPY_MPIDense,
1387: MatCreateSubMatrices_MPIDense,
1388: NULL,
1389: MatGetValues_MPIDense,
1390: NULL,
1391: /* 44*/ NULL,
1392: MatScale_MPIDense,
1393: MatShift_Basic,
1394: NULL,
1395: NULL,
1396: /* 49*/ MatSetRandom_MPIDense,
1397: NULL,
1398: NULL,
1399: NULL,
1400: NULL,
1401: /* 54*/ NULL,
1402: NULL,
1403: NULL,
1404: NULL,
1405: NULL,
1406: /* 59*/ MatCreateSubMatrix_MPIDense,
1407: MatDestroy_MPIDense,
1408: MatView_MPIDense,
1409: NULL,
1410: NULL,
1411: /* 64*/ NULL,
1412: NULL,
1413: NULL,
1414: NULL,
1415: NULL,
1416: /* 69*/ NULL,
1417: NULL,
1418: NULL,
1419: NULL,
1420: NULL,
1421: /* 74*/ NULL,
1422: NULL,
1423: NULL,
1424: NULL,
1425: NULL,
1426: /* 79*/ NULL,
1427: NULL,
1428: NULL,
1429: NULL,
1430: /* 83*/ MatLoad_MPIDense,
1431: NULL,
1432: NULL,
1433: NULL,
1434: NULL,
1435: NULL,
1436: /* 89*/ NULL,
1437: NULL,
1438: NULL,
1439: NULL,
1440: NULL,
1441: /* 94*/ NULL,
1442: NULL,
1443: MatMatTransposeMultSymbolic_MPIDense_MPIDense,
1444: MatMatTransposeMultNumeric_MPIDense_MPIDense,
1445: NULL,
1446: /* 99*/ MatProductSetFromOptions_MPIDense,
1447: NULL,
1448: NULL,
1449: MatConjugate_MPIDense,
1450: NULL,
1451: /*104*/ NULL,
1452: MatRealPart_MPIDense,
1453: MatImaginaryPart_MPIDense,
1454: NULL,
1455: NULL,
1456: /*109*/ NULL,
1457: NULL,
1458: NULL,
1459: MatGetColumnVector_MPIDense,
1460: MatMissingDiagonal_MPIDense,
1461: /*114*/ NULL,
1462: NULL,
1463: NULL,
1464: NULL,
1465: NULL,
1466: /*119*/ NULL,
1467: NULL,
1468: NULL,
1469: NULL,
1470: NULL,
1471: /*124*/ NULL,
1472: MatGetColumnNorms_MPIDense,
1473: NULL,
1474: NULL,
1475: NULL,
1476: /*129*/ NULL,
1477: NULL,
1478: MatTransposeMatMultSymbolic_MPIDense_MPIDense,
1479: MatTransposeMatMultNumeric_MPIDense_MPIDense,
1480: NULL,
1481: /*134*/ NULL,
1482: NULL,
1483: NULL,
1484: NULL,
1485: NULL,
1486: /*139*/ NULL,
1487: NULL,
1488: NULL,
1489: NULL,
1490: NULL,
1491: MatCreateMPIMatConcatenateSeqMat_MPIDense,
1492: /*145*/ NULL,
1493: NULL,
1494: NULL
1495: };
1497: PetscErrorCode MatMPIDenseSetPreallocation_MPIDense(Mat mat,PetscScalar *data)
1498: {
1499: Mat_MPIDense *a = (Mat_MPIDense*)mat->data;
1500: PetscBool iscuda;
1504: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1505: PetscLayoutSetUp(mat->rmap);
1506: PetscLayoutSetUp(mat->cmap);
1507: if (!a->A) {
1508: MatCreate(PETSC_COMM_SELF,&a->A);
1509: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
1510: MatSetSizes(a->A,mat->rmap->n,mat->cmap->N,mat->rmap->n,mat->cmap->N);
1511: }
1512: PetscObjectTypeCompare((PetscObject)mat,MATMPIDENSECUDA,&iscuda);
1513: MatSetType(a->A,iscuda ? MATSEQDENSECUDA : MATSEQDENSE);
1514: MatSeqDenseSetPreallocation(a->A,data);
1515: mat->preallocated = PETSC_TRUE;
1516: return(0);
1517: }
1519: #if defined(PETSC_HAVE_ELEMENTAL)
1520: PETSC_INTERN PetscErrorCode MatConvert_MPIDense_Elemental(Mat A, MatType newtype,MatReuse reuse,Mat *newmat)
1521: {
1522: Mat mat_elemental;
1524: PetscScalar *v;
1525: PetscInt m=A->rmap->n,N=A->cmap->N,rstart=A->rmap->rstart,i,*rows,*cols;
1528: if (reuse == MAT_REUSE_MATRIX) {
1529: mat_elemental = *newmat;
1530: MatZeroEntries(*newmat);
1531: } else {
1532: MatCreate(PetscObjectComm((PetscObject)A), &mat_elemental);
1533: MatSetSizes(mat_elemental,PETSC_DECIDE,PETSC_DECIDE,A->rmap->N,A->cmap->N);
1534: MatSetType(mat_elemental,MATELEMENTAL);
1535: MatSetUp(mat_elemental);
1536: MatSetOption(mat_elemental,MAT_ROW_ORIENTED,PETSC_FALSE);
1537: }
1539: PetscMalloc2(m,&rows,N,&cols);
1540: for (i=0; i<N; i++) cols[i] = i;
1541: for (i=0; i<m; i++) rows[i] = rstart + i;
1543: /* PETSc-Elemental interface uses axpy for setting off-processor entries, only ADD_VALUES is allowed */
1544: MatDenseGetArray(A,&v);
1545: MatSetValues(mat_elemental,m,rows,N,cols,v,ADD_VALUES);
1546: MatAssemblyBegin(mat_elemental, MAT_FINAL_ASSEMBLY);
1547: MatAssemblyEnd(mat_elemental, MAT_FINAL_ASSEMBLY);
1548: MatDenseRestoreArray(A,&v);
1549: PetscFree2(rows,cols);
1551: if (reuse == MAT_INPLACE_MATRIX) {
1552: MatHeaderReplace(A,&mat_elemental);
1553: } else {
1554: *newmat = mat_elemental;
1555: }
1556: return(0);
1557: }
1558: #endif
1560: static PetscErrorCode MatDenseGetColumn_MPIDense(Mat A,PetscInt col,PetscScalar **vals)
1561: {
1562: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
1566: MatDenseGetColumn(mat->A,col,vals);
1567: return(0);
1568: }
1570: static PetscErrorCode MatDenseRestoreColumn_MPIDense(Mat A,PetscScalar **vals)
1571: {
1572: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
1576: MatDenseRestoreColumn(mat->A,vals);
1577: return(0);
1578: }
1580: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIDense(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
1581: {
1583: Mat_MPIDense *mat;
1584: PetscInt m,nloc,N;
1587: MatGetSize(inmat,&m,&N);
1588: MatGetLocalSize(inmat,NULL,&nloc);
1589: if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
1590: PetscInt sum;
1592: if (n == PETSC_DECIDE) {
1593: PetscSplitOwnership(comm,&n,&N);
1594: }
1595: /* Check sum(n) = N */
1596: MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
1597: if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);
1599: MatCreateDense(comm,m,n,PETSC_DETERMINE,N,NULL,outmat);
1600: }
1602: /* numeric phase */
1603: mat = (Mat_MPIDense*)(*outmat)->data;
1604: MatCopy(inmat,mat->A,SAME_NONZERO_PATTERN);
1605: MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
1606: MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
1607: return(0);
1608: }
1610: #if defined(PETSC_HAVE_CUDA)
1611: PetscErrorCode MatConvert_MPIDenseCUDA_MPIDense(Mat M,MatType type,MatReuse reuse,Mat *newmat)
1612: {
1613: Mat B;
1614: Mat_MPIDense *m;
1618: if (reuse == MAT_INITIAL_MATRIX) {
1619: MatDuplicate(M,MAT_COPY_VALUES,newmat);
1620: } else if (reuse == MAT_REUSE_MATRIX) {
1621: MatCopy(M,*newmat,SAME_NONZERO_PATTERN);
1622: }
1624: B = *newmat;
1625: MatBindToCPU_MPIDenseCUDA(B,PETSC_TRUE);
1626: PetscFree(B->defaultvectype);
1627: PetscStrallocpy(VECSTANDARD,&B->defaultvectype);
1628: PetscObjectChangeTypeName((PetscObject)B,MATMPIDENSE);
1629: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpidensecuda_mpidense_C",NULL);
1630: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaij_mpidensecuda_C",NULL);
1631: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaijcusparse_mpidensecuda_C",NULL);
1632: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpidensecuda_mpiaij_C",NULL);
1633: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpidensecuda_mpiaijcusparse_C",NULL);
1634: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C",NULL);
1635: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C",NULL);
1636: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C",NULL);
1637: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C",NULL);
1638: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C",NULL);
1639: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C",NULL);
1640: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C",NULL);
1641: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C",NULL);
1642: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C",NULL);
1643: m = (Mat_MPIDense*)(B)->data;
1644: if (m->A) {
1645: MatConvert(m->A,MATSEQDENSE,MAT_INPLACE_MATRIX,&m->A);
1646: MatSetUpMultiply_MPIDense(B);
1647: }
1648: B->ops->bindtocpu = NULL;
1649: B->offloadmask = PETSC_OFFLOAD_CPU;
1650: return(0);
1651: }
1653: PetscErrorCode MatConvert_MPIDense_MPIDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat)
1654: {
1655: Mat B;
1656: Mat_MPIDense *m;
1660: if (reuse == MAT_INITIAL_MATRIX) {
1661: MatDuplicate(M,MAT_COPY_VALUES,newmat);
1662: } else if (reuse == MAT_REUSE_MATRIX) {
1663: MatCopy(M,*newmat,SAME_NONZERO_PATTERN);
1664: }
1666: B = *newmat;
1667: PetscFree(B->defaultvectype);
1668: PetscStrallocpy(VECCUDA,&B->defaultvectype);
1669: PetscObjectChangeTypeName((PetscObject)B,MATMPIDENSECUDA);
1670: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpidensecuda_mpidense_C", MatConvert_MPIDenseCUDA_MPIDense);
1671: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaij_mpidensecuda_C", MatProductSetFromOptions_MPIAIJ_MPIDense);
1672: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaijcusparse_mpidensecuda_C",MatProductSetFromOptions_MPIAIJ_MPIDense);
1673: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpidensecuda_mpiaij_C", MatProductSetFromOptions_MPIDense_MPIAIJ);
1674: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpidensecuda_mpiaijcusparse_C",MatProductSetFromOptions_MPIDense_MPIAIJ);
1675: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C", MatDenseCUDAGetArray_MPIDenseCUDA);
1676: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C", MatDenseCUDAGetArrayRead_MPIDenseCUDA);
1677: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C", MatDenseCUDAGetArrayWrite_MPIDenseCUDA);
1678: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C", MatDenseCUDARestoreArray_MPIDenseCUDA);
1679: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C", MatDenseCUDARestoreArrayRead_MPIDenseCUDA);
1680: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C", MatDenseCUDARestoreArrayWrite_MPIDenseCUDA);
1681: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C", MatDenseCUDAPlaceArray_MPIDenseCUDA);
1682: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C", MatDenseCUDAResetArray_MPIDenseCUDA);
1683: PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C", MatDenseCUDAReplaceArray_MPIDenseCUDA);
1684: m = (Mat_MPIDense*)(B)->data;
1685: if (m->A) {
1686: MatConvert(m->A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&m->A);
1687: MatSetUpMultiply_MPIDense(B);
1688: B->offloadmask = PETSC_OFFLOAD_BOTH;
1689: } else {
1690: B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
1691: }
1692: MatBindToCPU_MPIDenseCUDA(B,PETSC_FALSE);
1694: B->ops->bindtocpu = MatBindToCPU_MPIDenseCUDA;
1695: return(0);
1696: }
1697: #endif
1699: PetscErrorCode MatDenseGetColumnVec_MPIDense(Mat A,PetscInt col,Vec *v)
1700: {
1701: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1703: PetscInt lda;
1706: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1707: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1708: if (!a->cvec) {
1709: VecCreateMPIWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,A->rmap->N,NULL,&a->cvec);
1710: }
1711: a->vecinuse = col + 1;
1712: MatDenseGetLDA(a->A,&lda);
1713: MatDenseGetArray(a->A,(PetscScalar**)&a->ptrinuse);
1714: VecPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)lda);
1715: *v = a->cvec;
1716: return(0);
1717: }
1719: PetscErrorCode MatDenseRestoreColumnVec_MPIDense(Mat A,PetscInt col,Vec *v)
1720: {
1721: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1725: if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
1726: if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector");
1727: a->vecinuse = 0;
1728: MatDenseRestoreArray(a->A,(PetscScalar**)&a->ptrinuse);
1729: VecResetArray(a->cvec);
1730: *v = NULL;
1731: return(0);
1732: }
1734: PetscErrorCode MatDenseGetColumnVecRead_MPIDense(Mat A,PetscInt col,Vec *v)
1735: {
1736: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1738: PetscInt lda;
1741: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1742: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1743: if (!a->cvec) {
1744: VecCreateMPIWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,A->rmap->N,NULL,&a->cvec);
1745: }
1746: a->vecinuse = col + 1;
1747: MatDenseGetLDA(a->A,&lda);
1748: MatDenseGetArrayRead(a->A,&a->ptrinuse);
1749: VecPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)lda);
1750: VecLockReadPush(a->cvec);
1751: *v = a->cvec;
1752: return(0);
1753: }
1755: PetscErrorCode MatDenseRestoreColumnVecRead_MPIDense(Mat A,PetscInt col,Vec *v)
1756: {
1757: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1761: if (!a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
1762: if (!a->cvec) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing internal column vector");
1763: a->vecinuse = 0;
1764: MatDenseRestoreArrayRead(a->A,&a->ptrinuse);
1765: VecLockReadPop(a->cvec);
1766: VecResetArray(a->cvec);
1767: *v = NULL;
1768: return(0);
1769: }
1771: PetscErrorCode MatDenseGetColumnVecWrite_MPIDense(Mat A,PetscInt col,Vec *v)
1772: {
1773: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1775: PetscInt lda;
1778: if (a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1779: if (a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1780: if (!a->cvec) {
1781: VecCreateMPIWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,A->rmap->N,NULL,&a->cvec);
1782: }
1783: a->vecinuse = col + 1;
1784: MatDenseGetLDA(a->A,&lda);
1785: MatDenseGetArrayWrite(a->A,(PetscScalar**)&a->ptrinuse);
1786: VecPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)lda);
1787: *v = a->cvec;
1788: return(0);
1789: }
1791: PetscErrorCode MatDenseRestoreColumnVecWrite_MPIDense(Mat A,PetscInt col,Vec *v)
1792: {
1793: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1797: if (!a->vecinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
1798: if (!a->cvec) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing internal column vector");
1799: a->vecinuse = 0;
1800: MatDenseRestoreArrayWrite(a->A,(PetscScalar**)&a->ptrinuse);
1801: VecResetArray(a->cvec);
1802: *v = NULL;
1803: return(0);
1804: }
1806: PetscErrorCode MatDenseGetSubMatrix_MPIDense(Mat A,PetscInt cbegin,PetscInt cend,Mat *v)
1807: {
1808: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1809: Mat_MPIDense *c;
1811: MPI_Comm comm;
1812: PetscBool setup = PETSC_FALSE;
1815: PetscObjectGetComm((PetscObject)A,&comm);
1816: if (a->vecinuse) SETERRQ(comm,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
1817: if (a->matinuse) SETERRQ(comm,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1818: if (!a->cmat) {
1819: setup = PETSC_TRUE;
1820: MatCreate(comm,&a->cmat);
1821: PetscLogObjectParent((PetscObject)A,(PetscObject)a->cmat);
1822: MatSetType(a->cmat,((PetscObject)A)->type_name);
1823: PetscLayoutReference(A->rmap,&a->cmat->rmap);
1824: PetscLayoutSetSize(a->cmat->cmap,cend-cbegin);
1825: PetscLayoutSetUp(a->cmat->cmap);
1826: } else if (cend-cbegin != a->cmat->cmap->N) {
1827: setup = PETSC_TRUE;
1828: PetscLayoutDestroy(&a->cmat->cmap);
1829: PetscLayoutCreate(comm,&a->cmat->cmap);
1830: PetscLayoutSetSize(a->cmat->cmap,cend-cbegin);
1831: PetscLayoutSetUp(a->cmat->cmap);
1832: }
1833: c = (Mat_MPIDense*)a->cmat->data;
1834: if (c->A) SETERRQ(comm,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
1835: MatDenseGetSubMatrix(a->A,cbegin,cend,&c->A);
1836: if (setup) { /* do we really need this? */
1837: MatSetUpMultiply_MPIDense(a->cmat);
1838: }
1839: a->cmat->preallocated = PETSC_TRUE;
1840: a->cmat->assembled = PETSC_TRUE;
1841: a->matinuse = cbegin + 1;
1842: *v = a->cmat;
1843: return(0);
1844: }
1846: PetscErrorCode MatDenseRestoreSubMatrix_MPIDense(Mat A,Mat *v)
1847: {
1848: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1849: Mat_MPIDense *c;
1853: if (!a->matinuse) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ORDER,"Need to call MatDenseGetSubMatrix() first");
1854: if (!a->cmat) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing internal matrix");
1855: if (*v != a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not the matrix obtained from MatDenseGetSubMatrix()");
1856: a->matinuse = 0;
1857: c = (Mat_MPIDense*)a->cmat->data;
1858: MatDenseRestoreSubMatrix(a->A,&c->A);
1859: *v = NULL;
1860: return(0);
1861: }
1863: PETSC_EXTERN PetscErrorCode MatCreate_MPIDense(Mat mat)
1864: {
1865: Mat_MPIDense *a;
1869: PetscNewLog(mat,&a);
1870: mat->data = (void*)a;
1871: PetscMemcpy(mat->ops,&MatOps_Values,sizeof(struct _MatOps));
1873: mat->insertmode = NOT_SET_VALUES;
1875: /* build cache for off array entries formed */
1876: a->donotstash = PETSC_FALSE;
1878: MatStashCreate_Private(PetscObjectComm((PetscObject)mat),1,&mat->stash);
1880: /* stuff used for matrix vector multiply */
1881: a->lvec = NULL;
1882: a->Mvctx = NULL;
1883: a->roworiented = PETSC_TRUE;
1885: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetLDA_C",MatDenseGetLDA_MPIDense);
1886: PetscObjectComposeFunction((PetscObject)mat,"MatDenseSetLDA_C",MatDenseSetLDA_MPIDense);
1887: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetArray_C",MatDenseGetArray_MPIDense);
1888: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreArray_C",MatDenseRestoreArray_MPIDense);
1889: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetArrayRead_C",MatDenseGetArrayRead_MPIDense);
1890: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreArrayRead_C",MatDenseRestoreArrayRead_MPIDense);
1891: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetArrayWrite_C",MatDenseGetArrayWrite_MPIDense);
1892: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreArrayWrite_C",MatDenseRestoreArrayWrite_MPIDense);
1893: PetscObjectComposeFunction((PetscObject)mat,"MatDensePlaceArray_C",MatDensePlaceArray_MPIDense);
1894: PetscObjectComposeFunction((PetscObject)mat,"MatDenseResetArray_C",MatDenseResetArray_MPIDense);
1895: PetscObjectComposeFunction((PetscObject)mat,"MatDenseReplaceArray_C",MatDenseReplaceArray_MPIDense);
1896: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_MPIDense);
1897: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_MPIDense);
1898: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_MPIDense);
1899: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_MPIDense);
1900: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_MPIDense);
1901: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_MPIDense);
1902: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_MPIDense);
1903: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_MPIDense);
1904: #if defined(PETSC_HAVE_ELEMENTAL)
1905: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpidense_elemental_C",MatConvert_MPIDense_Elemental);
1906: #endif
1907: #if defined(PETSC_HAVE_SCALAPACK)
1908: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpidense_scalapack_C",MatConvert_Dense_ScaLAPACK);
1909: #endif
1910: #if defined(PETSC_HAVE_CUDA)
1911: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpidense_mpidensecuda_C",MatConvert_MPIDense_MPIDenseCUDA);
1912: #endif
1913: PetscObjectComposeFunction((PetscObject)mat,"MatMPIDenseSetPreallocation_C",MatMPIDenseSetPreallocation_MPIDense);
1914: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaij_mpidense_C",MatProductSetFromOptions_MPIAIJ_MPIDense);
1915: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpidense_mpiaij_C",MatProductSetFromOptions_MPIDense_MPIAIJ);
1916: #if defined(PETSC_HAVE_CUDA)
1917: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaijcusparse_mpidense_C",MatProductSetFromOptions_MPIAIJ_MPIDense);
1918: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpidense_mpiaijcusparse_C",MatProductSetFromOptions_MPIDense_MPIAIJ);
1919: #endif
1921: PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetColumn_C",MatDenseGetColumn_MPIDense);
1922: PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreColumn_C",MatDenseRestoreColumn_MPIDense);
1923: PetscObjectChangeTypeName((PetscObject)mat,MATMPIDENSE);
1924: return(0);
1925: }
1927: /*MC
1928: MATMPIDENSECUDA - MATMPIDENSECUDA = "mpidensecuda" - A matrix type to be used for distributed dense matrices on GPUs.
1930: Options Database Keys:
1931: . -mat_type mpidensecuda - sets the matrix type to "mpidensecuda" during a call to MatSetFromOptions()
1933: Level: beginner
1935: .seealso:
1937: M*/
1938: #if defined(PETSC_HAVE_CUDA)
1939: PETSC_EXTERN PetscErrorCode MatCreate_MPIDenseCUDA(Mat B)
1940: {
1944: PetscCUDAInitializeCheck();
1945: MatCreate_MPIDense(B);
1946: MatConvert_MPIDense_MPIDenseCUDA(B,MATMPIDENSECUDA,MAT_INPLACE_MATRIX,&B);
1947: return(0);
1948: }
1949: #endif
1951: /*MC
1952: MATDENSE - MATDENSE = "dense" - A matrix type to be used for dense matrices.
1954: This matrix type is identical to MATSEQDENSE when constructed with a single process communicator,
1955: and MATMPIDENSE otherwise.
1957: Options Database Keys:
1958: . -mat_type dense - sets the matrix type to "dense" during a call to MatSetFromOptions()
1960: Level: beginner
1963: .seealso: MATSEQDENSE,MATMPIDENSE,MATDENSECUDA
1964: M*/
1966: /*MC
1967: MATDENSECUDA - MATDENSECUDA = "densecuda" - A matrix type to be used for dense matrices on GPUs.
1969: This matrix type is identical to MATSEQDENSECUDA when constructed with a single process communicator,
1970: and MATMPIDENSECUDA otherwise.
1972: Options Database Keys:
1973: . -mat_type densecuda - sets the matrix type to "densecuda" during a call to MatSetFromOptions()
1975: Level: beginner
1977: .seealso: MATSEQDENSECUDA,MATMPIDENSECUDA,MATDENSE
1978: M*/
1980: /*@C
1981: MatMPIDenseSetPreallocation - Sets the array used to store the matrix entries
1983: Collective
1985: Input Parameters:
1986: . B - the matrix
1987: - data - optional location of matrix data. Set data=NULL for PETSc
1988: to control all matrix memory allocation.
1990: Notes:
1991: The dense format is fully compatible with standard Fortran 77
1992: storage by columns.
1994: The data input variable is intended primarily for Fortran programmers
1995: who wish to allocate their own matrix memory space. Most users should
1996: set data=NULL.
1998: Level: intermediate
2000: .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
2001: @*/
2002: PetscErrorCode MatMPIDenseSetPreallocation(Mat B,PetscScalar *data)
2003: {
2008: PetscTryMethod(B,"MatMPIDenseSetPreallocation_C",(Mat,PetscScalar*),(B,data));
2009: return(0);
2010: }
2012: /*@
2013: MatDensePlaceArray - Allows one to replace the array in a dense matrix with an
2014: array provided by the user. This is useful to avoid copying an array
2015: into a matrix
2017: Not Collective
2019: Input Parameters:
2020: + mat - the matrix
2021: - array - the array in column major order
2023: Notes:
2024: You can return to the original array with a call to MatDenseResetArray(). The user is responsible for freeing this array; it will not be
2025: freed when the matrix is destroyed.
2027: Level: developer
2029: .seealso: MatDenseGetArray(), MatDenseResetArray(), VecPlaceArray(), VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecResetArray()
2031: @*/
2032: PetscErrorCode MatDensePlaceArray(Mat mat,const PetscScalar *array)
2033: {
2038: PetscUseMethod(mat,"MatDensePlaceArray_C",(Mat,const PetscScalar*),(mat,array));
2039: PetscObjectStateIncrease((PetscObject)mat);
2040: #if defined(PETSC_HAVE_CUDA)
2041: mat->offloadmask = PETSC_OFFLOAD_CPU;
2042: #endif
2043: return(0);
2044: }
2046: /*@
2047: MatDenseResetArray - Resets the matrix array to that it previously had before the call to MatDensePlaceArray()
2049: Not Collective
2051: Input Parameters:
2052: . mat - the matrix
2054: Notes:
2055: You can only call this after a call to MatDensePlaceArray()
2057: Level: developer
2059: .seealso: MatDenseGetArray(), MatDensePlaceArray(), VecPlaceArray(), VecGetArray(), VecRestoreArray(), VecReplaceArray(), VecResetArray()
2061: @*/
2062: PetscErrorCode MatDenseResetArray(Mat mat)
2063: {
2068: PetscUseMethod(mat,"MatDenseResetArray_C",(Mat),(mat));
2069: PetscObjectStateIncrease((PetscObject)mat);
2070: return(0);
2071: }
2073: /*@
2074: MatDenseReplaceArray - Allows one to replace the array in a dense matrix with an
2075: array provided by the user. This is useful to avoid copying an array
2076: into a matrix
2078: Not Collective
2080: Input Parameters:
2081: + mat - the matrix
2082: - array - the array in column major order
2084: Notes:
2085: The memory passed in MUST be obtained with PetscMalloc() and CANNOT be
2086: freed by the user. It will be freed when the matrix is destroyed.
2088: Level: developer
2090: .seealso: MatDenseGetArray(), VecReplaceArray()
2091: @*/
2092: PetscErrorCode MatDenseReplaceArray(Mat mat,const PetscScalar *array)
2093: {
2098: PetscUseMethod(mat,"MatDenseReplaceArray_C",(Mat,const PetscScalar*),(mat,array));
2099: PetscObjectStateIncrease((PetscObject)mat);
2100: #if defined(PETSC_HAVE_CUDA)
2101: mat->offloadmask = PETSC_OFFLOAD_CPU;
2102: #endif
2103: return(0);
2104: }
2106: #if defined(PETSC_HAVE_CUDA)
2107: /*@C
2108: MatDenseCUDAPlaceArray - Allows one to replace the GPU array in a dense matrix with an
2109: array provided by the user. This is useful to avoid copying an array
2110: into a matrix
2112: Not Collective
2114: Input Parameters:
2115: + mat - the matrix
2116: - array - the array in column major order
2118: Notes:
2119: You can return to the original array with a call to MatDenseCUDAResetArray(). The user is responsible for freeing this array; it will not be
2120: freed when the matrix is destroyed. The array must have been allocated with cudaMalloc().
2122: Level: developer
2124: .seealso: MatDenseCUDAGetArray(), MatDenseCUDAResetArray()
2125: @*/
2126: PetscErrorCode MatDenseCUDAPlaceArray(Mat mat,const PetscScalar *array)
2127: {
2132: PetscUseMethod(mat,"MatDenseCUDAPlaceArray_C",(Mat,const PetscScalar*),(mat,array));
2133: PetscObjectStateIncrease((PetscObject)mat);
2134: mat->offloadmask = PETSC_OFFLOAD_GPU;
2135: return(0);
2136: }
2138: /*@C
2139: MatDenseCUDAResetArray - Resets the matrix array to that it previously had before the call to MatDenseCUDAPlaceArray()
2141: Not Collective
2143: Input Parameters:
2144: . mat - the matrix
2146: Notes:
2147: You can only call this after a call to MatDenseCUDAPlaceArray()
2149: Level: developer
2151: .seealso: MatDenseCUDAGetArray(), MatDenseCUDAPlaceArray()
2153: @*/
2154: PetscErrorCode MatDenseCUDAResetArray(Mat mat)
2155: {
2160: PetscUseMethod(mat,"MatDenseCUDAResetArray_C",(Mat),(mat));
2161: PetscObjectStateIncrease((PetscObject)mat);
2162: return(0);
2163: }
2165: /*@C
2166: MatDenseCUDAReplaceArray - Allows one to replace the GPU array in a dense matrix with an
2167: array provided by the user. This is useful to avoid copying an array
2168: into a matrix
2170: Not Collective
2172: Input Parameters:
2173: + mat - the matrix
2174: - array - the array in column major order
2176: Notes:
2177: This permanently replaces the GPU array and frees the memory associated with the old GPU array.
2178: The memory passed in CANNOT be freed by the user. It will be freed
2179: when the matrix is destroyed. The array should respect the matrix leading dimension.
2181: Level: developer
2183: .seealso: MatDenseCUDAGetArray(), MatDenseCUDAPlaceArray(), MatDenseCUDAResetArray()
2184: @*/
2185: PetscErrorCode MatDenseCUDAReplaceArray(Mat mat,const PetscScalar *array)
2186: {
2191: PetscUseMethod(mat,"MatDenseCUDAReplaceArray_C",(Mat,const PetscScalar*),(mat,array));
2192: PetscObjectStateIncrease((PetscObject)mat);
2193: mat->offloadmask = PETSC_OFFLOAD_GPU;
2194: return(0);
2195: }
2197: /*@C
2198: MatDenseCUDAGetArrayWrite - Provides write access to the CUDA buffer inside a dense matrix.
2200: Not Collective
2202: Input Parameters:
2203: . A - the matrix
2205: Output Parameters
2206: . array - the GPU array in column major order
2208: Notes:
2209: The data on the GPU may not be updated due to operations done on the CPU. If you need updated data, use MatDenseCUDAGetArray(). The array must be restored with MatDenseCUDARestoreArrayWrite() when no longer needed.
2211: Level: developer
2213: .seealso: MatDenseCUDAGetArray(), MatDenseCUDARestoreArray(), MatDenseCUDARestoreArrayWrite(), MatDenseCUDAGetArrayRead(), MatDenseCUDARestoreArrayRead()
2214: @*/
2215: PetscErrorCode MatDenseCUDAGetArrayWrite(Mat A, PetscScalar **a)
2216: {
2221: PetscUseMethod(A,"MatDenseCUDAGetArrayWrite_C",(Mat,PetscScalar**),(A,a));
2222: PetscObjectStateIncrease((PetscObject)A);
2223: return(0);
2224: }
2226: /*@C
2227: MatDenseCUDARestoreArrayWrite - Restore write access to the CUDA buffer inside a dense matrix previously obtained with MatDenseCUDAGetArrayWrite().
2229: Not Collective
2231: Input Parameters:
2232: + A - the matrix
2233: - array - the GPU array in column major order
2235: Notes:
2237: Level: developer
2239: .seealso: MatDenseCUDAGetArray(), MatDenseCUDARestoreArray(), MatDenseCUDAGetArrayWrite(), MatDenseCUDARestoreArrayRead(), MatDenseCUDAGetArrayRead()
2240: @*/
2241: PetscErrorCode MatDenseCUDARestoreArrayWrite(Mat A, PetscScalar **a)
2242: {
2247: PetscUseMethod(A,"MatDenseCUDARestoreArrayWrite_C",(Mat,PetscScalar**),(A,a));
2248: PetscObjectStateIncrease((PetscObject)A);
2249: A->offloadmask = PETSC_OFFLOAD_GPU;
2250: return(0);
2251: }
2253: /*@C
2254: MatDenseCUDAGetArrayRead - Provides read-only access to the CUDA buffer inside a dense matrix. The array must be restored with MatDenseCUDARestoreArrayRead() when no longer needed.
2256: Not Collective
2258: Input Parameters:
2259: . A - the matrix
2261: Output Parameters
2262: . array - the GPU array in column major order
2264: Notes:
2265: Data can be copied to the GPU due to operations done on the CPU. If you need write only access, use MatDenseCUDAGetArrayWrite().
2267: Level: developer
2269: .seealso: MatDenseCUDAGetArray(), MatDenseCUDARestoreArray(), MatDenseCUDARestoreArrayWrite(), MatDenseCUDAGetArrayWrite(), MatDenseCUDARestoreArrayRead()
2270: @*/
2271: PetscErrorCode MatDenseCUDAGetArrayRead(Mat A, const PetscScalar **a)
2272: {
2277: PetscUseMethod(A,"MatDenseCUDAGetArrayRead_C",(Mat,const PetscScalar**),(A,a));
2278: return(0);
2279: }
2281: /*@C
2282: MatDenseCUDARestoreArrayRead - Restore read-only access to the CUDA buffer inside a dense matrix previously obtained with a call to MatDenseCUDAGetArrayRead().
2284: Not Collective
2286: Input Parameters:
2287: + A - the matrix
2288: - array - the GPU array in column major order
2290: Notes:
2291: Data can be copied to the GPU due to operations done on the CPU. If you need write only access, use MatDenseCUDAGetArrayWrite().
2293: Level: developer
2295: .seealso: MatDenseCUDAGetArray(), MatDenseCUDARestoreArray(), MatDenseCUDARestoreArrayWrite(), MatDenseCUDAGetArrayWrite(), MatDenseCUDAGetArrayRead()
2296: @*/
2297: PetscErrorCode MatDenseCUDARestoreArrayRead(Mat A, const PetscScalar **a)
2298: {
2302: PetscUseMethod(A,"MatDenseCUDARestoreArrayRead_C",(Mat,const PetscScalar**),(A,a));
2303: return(0);
2304: }
2306: /*@C
2307: MatDenseCUDAGetArray - Provides access to the CUDA buffer inside a dense matrix. The array must be restored with MatDenseCUDARestoreArray() when no longer needed.
2309: Not Collective
2311: Input Parameters:
2312: . A - the matrix
2314: Output Parameters
2315: . array - the GPU array in column major order
2317: Notes:
2318: Data can be copied to the GPU due to operations done on the CPU. If you need write only access, use MatDenseCUDAGetArrayWrite(). For read-only access, use MatDenseCUDAGetArrayRead().
2320: Level: developer
2322: .seealso: MatDenseCUDAGetArrayRead(), MatDenseCUDARestoreArray(), MatDenseCUDARestoreArrayWrite(), MatDenseCUDAGetArrayWrite(), MatDenseCUDARestoreArrayRead()
2323: @*/
2324: PetscErrorCode MatDenseCUDAGetArray(Mat A, PetscScalar **a)
2325: {
2330: PetscUseMethod(A,"MatDenseCUDAGetArray_C",(Mat,PetscScalar**),(A,a));
2331: PetscObjectStateIncrease((PetscObject)A);
2332: return(0);
2333: }
2335: /*@C
2336: MatDenseCUDARestoreArray - Restore access to the CUDA buffer inside a dense matrix previously obtained with MatDenseCUDAGetArray().
2338: Not Collective
2340: Input Parameters:
2341: + A - the matrix
2342: - array - the GPU array in column major order
2344: Notes:
2346: Level: developer
2348: .seealso: MatDenseCUDAGetArray(), MatDenseCUDARestoreArrayWrite(), MatDenseCUDAGetArrayWrite(), MatDenseCUDARestoreArrayRead(), MatDenseCUDAGetArrayRead()
2349: @*/
2350: PetscErrorCode MatDenseCUDARestoreArray(Mat A, PetscScalar **a)
2351: {
2356: PetscUseMethod(A,"MatDenseCUDARestoreArray_C",(Mat,PetscScalar**),(A,a));
2357: PetscObjectStateIncrease((PetscObject)A);
2358: A->offloadmask = PETSC_OFFLOAD_GPU;
2359: return(0);
2360: }
2361: #endif
2363: /*@C
2364: MatCreateDense - Creates a matrix in dense format.
2366: Collective
2368: Input Parameters:
2369: + comm - MPI communicator
2370: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
2371: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
2372: . M - number of global rows (or PETSC_DECIDE to have calculated if m is given)
2373: . N - number of global columns (or PETSC_DECIDE to have calculated if n is given)
2374: - data - optional location of matrix data. Set data=NULL (PETSC_NULL_SCALAR for Fortran users) for PETSc
2375: to control all matrix memory allocation.
2377: Output Parameter:
2378: . A - the matrix
2380: Notes:
2381: The dense format is fully compatible with standard Fortran 77
2382: storage by columns.
2384: The data input variable is intended primarily for Fortran programmers
2385: who wish to allocate their own matrix memory space. Most users should
2386: set data=NULL (PETSC_NULL_SCALAR for Fortran users).
2388: The user MUST specify either the local or global matrix dimensions
2389: (possibly both).
2391: Level: intermediate
2393: .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
2394: @*/
2395: PetscErrorCode MatCreateDense(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscScalar *data,Mat *A)
2396: {
2398: PetscMPIInt size;
2401: MatCreate(comm,A);
2402: MatSetSizes(*A,m,n,M,N);
2403: MPI_Comm_size(comm,&size);
2404: if (size > 1) {
2405: PetscBool havedata = (PetscBool)!!data;
2407: MatSetType(*A,MATMPIDENSE);
2408: MatMPIDenseSetPreallocation(*A,data);
2409: MPIU_Allreduce(MPI_IN_PLACE,&havedata,1,MPIU_BOOL,MPI_LOR,comm);
2410: if (havedata) { /* user provided data array, so no need to assemble */
2411: MatSetUpMultiply_MPIDense(*A);
2412: (*A)->assembled = PETSC_TRUE;
2413: }
2414: } else {
2415: MatSetType(*A,MATSEQDENSE);
2416: MatSeqDenseSetPreallocation(*A,data);
2417: }
2418: return(0);
2419: }
2421: #if defined(PETSC_HAVE_CUDA)
2422: /*@C
2423: MatCreateDenseCUDA - Creates a matrix in dense format using CUDA.
2425: Collective
2427: Input Parameters:
2428: + comm - MPI communicator
2429: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
2430: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
2431: . M - number of global rows (or PETSC_DECIDE to have calculated if m is given)
2432: . N - number of global columns (or PETSC_DECIDE to have calculated if n is given)
2433: - data - optional location of GPU matrix data. Set data=NULL for PETSc
2434: to control matrix memory allocation.
2436: Output Parameter:
2437: . A - the matrix
2439: Notes:
2441: Level: intermediate
2443: .seealso: MatCreate(), MatCreateDense()
2444: @*/
2445: PetscErrorCode MatCreateDenseCUDA(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscScalar *data,Mat *A)
2446: {
2448: PetscMPIInt size;
2451: MatCreate(comm,A);
2453: MatSetSizes(*A,m,n,M,N);
2454: MPI_Comm_size(comm,&size);
2455: if (size > 1) {
2456: MatSetType(*A,MATMPIDENSECUDA);
2457: MatMPIDenseCUDASetPreallocation(*A,data);
2458: if (data) { /* user provided data array, so no need to assemble */
2459: MatSetUpMultiply_MPIDense(*A);
2460: (*A)->assembled = PETSC_TRUE;
2461: }
2462: } else {
2463: MatSetType(*A,MATSEQDENSECUDA);
2464: MatSeqDenseCUDASetPreallocation(*A,data);
2465: }
2466: return(0);
2467: }
2468: #endif
2470: static PetscErrorCode MatDuplicate_MPIDense(Mat A,MatDuplicateOption cpvalues,Mat *newmat)
2471: {
2472: Mat mat;
2473: Mat_MPIDense *a,*oldmat = (Mat_MPIDense*)A->data;
2477: *newmat = NULL;
2478: MatCreate(PetscObjectComm((PetscObject)A),&mat);
2479: MatSetSizes(mat,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
2480: MatSetType(mat,((PetscObject)A)->type_name);
2481: a = (Mat_MPIDense*)mat->data;
2483: mat->factortype = A->factortype;
2484: mat->assembled = PETSC_TRUE;
2485: mat->preallocated = PETSC_TRUE;
2487: mat->insertmode = NOT_SET_VALUES;
2488: a->donotstash = oldmat->donotstash;
2490: PetscLayoutReference(A->rmap,&mat->rmap);
2491: PetscLayoutReference(A->cmap,&mat->cmap);
2493: MatDuplicate(oldmat->A,cpvalues,&a->A);
2494: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2495: MatSetUpMultiply_MPIDense(mat);
2497: *newmat = mat;
2498: return(0);
2499: }
2501: PetscErrorCode MatLoad_MPIDense(Mat newMat, PetscViewer viewer)
2502: {
2504: PetscBool isbinary;
2505: #if defined(PETSC_HAVE_HDF5)
2506: PetscBool ishdf5;
2507: #endif
2512: /* force binary viewer to load .info file if it has not yet done so */
2513: PetscViewerSetUp(viewer);
2514: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2515: #if defined(PETSC_HAVE_HDF5)
2516: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5, &ishdf5);
2517: #endif
2518: if (isbinary) {
2519: MatLoad_Dense_Binary(newMat,viewer);
2520: #if defined(PETSC_HAVE_HDF5)
2521: } else if (ishdf5) {
2522: MatLoad_Dense_HDF5(newMat,viewer);
2523: #endif
2524: } else SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
2525: return(0);
2526: }
2528: static PetscErrorCode MatEqual_MPIDense(Mat A,Mat B,PetscBool *flag)
2529: {
2530: Mat_MPIDense *matB = (Mat_MPIDense*)B->data,*matA = (Mat_MPIDense*)A->data;
2531: Mat a,b;
2532: PetscBool flg;
2536: a = matA->A;
2537: b = matB->A;
2538: MatEqual(a,b,&flg);
2539: MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2540: return(0);
2541: }
2543: PetscErrorCode MatDestroy_MatTransMatMult_MPIDense_MPIDense(void *data)
2544: {
2545: PetscErrorCode ierr;
2546: Mat_TransMatMultDense *atb = (Mat_TransMatMultDense *)data;
2549: PetscFree2(atb->sendbuf,atb->recvcounts);
2550: MatDestroy(&atb->atb);
2551: PetscFree(atb);
2552: return(0);
2553: }
2555: PetscErrorCode MatDestroy_MatMatTransMult_MPIDense_MPIDense(void *data)
2556: {
2557: PetscErrorCode ierr;
2558: Mat_MatTransMultDense *abt = (Mat_MatTransMultDense *)data;
2561: PetscFree2(abt->buf[0],abt->buf[1]);
2562: PetscFree2(abt->recvcounts,abt->recvdispls);
2563: PetscFree(abt);
2564: return(0);
2565: }
2567: static PetscErrorCode MatTransposeMatMultNumeric_MPIDense_MPIDense(Mat A,Mat B,Mat C)
2568: {
2569: Mat_MPIDense *a=(Mat_MPIDense*)A->data, *b=(Mat_MPIDense*)B->data, *c=(Mat_MPIDense*)C->data;
2570: Mat_TransMatMultDense *atb;
2571: PetscErrorCode ierr;
2572: MPI_Comm comm;
2573: PetscMPIInt size,*recvcounts;
2574: PetscScalar *carray,*sendbuf;
2575: const PetscScalar *atbarray;
2576: PetscInt i,cN=C->cmap->N,cM=C->rmap->N,proc,k,j;
2577: const PetscInt *ranges;
2580: MatCheckProduct(C,3);
2581: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
2582: atb = (Mat_TransMatMultDense *)C->product->data;
2583: recvcounts = atb->recvcounts;
2584: sendbuf = atb->sendbuf;
2586: PetscObjectGetComm((PetscObject)A,&comm);
2587: MPI_Comm_size(comm,&size);
2589: /* compute atbarray = aseq^T * bseq */
2590: MatTransposeMatMult(a->A,b->A,atb->atb ? MAT_REUSE_MATRIX : MAT_INITIAL_MATRIX,PETSC_DEFAULT,&atb->atb);
2592: MatGetOwnershipRanges(C,&ranges);
2594: /* arrange atbarray into sendbuf */
2595: MatDenseGetArrayRead(atb->atb,&atbarray);
2596: for (proc=0, k=0; proc<size; proc++) {
2597: for (j=0; j<cN; j++) {
2598: for (i=ranges[proc]; i<ranges[proc+1]; i++) sendbuf[k++] = atbarray[i+j*cM];
2599: }
2600: }
2601: MatDenseRestoreArrayRead(atb->atb,&atbarray);
2603: /* sum all atbarray to local values of C */
2604: MatDenseGetArrayWrite(c->A,&carray);
2605: MPI_Reduce_scatter(sendbuf,carray,recvcounts,MPIU_SCALAR,MPIU_SUM,comm);
2606: MatDenseRestoreArrayWrite(c->A,&carray);
2607: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2608: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2609: return(0);
2610: }
2612: static PetscErrorCode MatTransposeMatMultSymbolic_MPIDense_MPIDense(Mat A,Mat B,PetscReal fill,Mat C)
2613: {
2614: PetscErrorCode ierr;
2615: MPI_Comm comm;
2616: PetscMPIInt size;
2617: PetscInt cm=A->cmap->n,cM,cN=B->cmap->N;
2618: Mat_TransMatMultDense *atb;
2619: PetscBool cisdense;
2620: PetscInt i;
2621: const PetscInt *ranges;
2624: MatCheckProduct(C,3);
2625: if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
2626: PetscObjectGetComm((PetscObject)A,&comm);
2627: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) {
2628: SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
2629: }
2631: /* create matrix product C */
2632: MatSetSizes(C,cm,B->cmap->n,A->cmap->N,B->cmap->N);
2633: PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATMPIDENSE,MATMPIDENSECUDA,"");
2634: if (!cisdense) {
2635: MatSetType(C,((PetscObject)A)->type_name);
2636: }
2637: MatSetUp(C);
2639: /* create data structure for reuse C */
2640: MPI_Comm_size(comm,&size);
2641: PetscNew(&atb);
2642: cM = C->rmap->N;
2643: PetscMalloc2((size_t)cM*(size_t)cN,&atb->sendbuf,size,&atb->recvcounts);
2644: MatGetOwnershipRanges(C,&ranges);
2645: for (i=0; i<size; i++) atb->recvcounts[i] = (ranges[i+1] - ranges[i])*cN;
2647: C->product->data = atb;
2648: C->product->destroy = MatDestroy_MatTransMatMult_MPIDense_MPIDense;
2649: return(0);
2650: }
2652: static PetscErrorCode MatMatTransposeMultSymbolic_MPIDense_MPIDense(Mat A, Mat B, PetscReal fill, Mat C)
2653: {
2654: PetscErrorCode ierr;
2655: MPI_Comm comm;
2656: PetscMPIInt i, size;
2657: PetscInt maxRows, bufsiz;
2658: PetscMPIInt tag;
2659: PetscInt alg;
2660: Mat_MatTransMultDense *abt;
2661: Mat_Product *product = C->product;
2662: PetscBool flg;
2665: MatCheckProduct(C,4);
2666: if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
2667: /* check local size of A and B */
2668: if (A->cmap->n != B->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local column dimensions are incompatible, A (%D) != B (%D)",A->cmap->n,B->cmap->n);
2670: PetscStrcmp(product->alg,"allgatherv",&flg);
2671: alg = flg ? 0 : 1;
2673: /* setup matrix product C */
2674: MatSetSizes(C,A->rmap->n,B->rmap->n,A->rmap->N,B->rmap->N);
2675: MatSetType(C,MATMPIDENSE);
2676: MatSetUp(C);
2677: PetscObjectGetNewTag((PetscObject)C,&tag);
2679: /* create data structure for reuse C */
2680: PetscObjectGetComm((PetscObject)C,&comm);
2681: MPI_Comm_size(comm,&size);
2682: PetscNew(&abt);
2683: abt->tag = tag;
2684: abt->alg = alg;
2685: switch (alg) {
2686: case 1: /* alg: "cyclic" */
2687: for (maxRows = 0, i = 0; i < size; i++) maxRows = PetscMax(maxRows, (B->rmap->range[i + 1] - B->rmap->range[i]));
2688: bufsiz = A->cmap->N * maxRows;
2689: PetscMalloc2(bufsiz,&(abt->buf[0]),bufsiz,&(abt->buf[1]));
2690: break;
2691: default: /* alg: "allgatherv" */
2692: PetscMalloc2(B->rmap->n * B->cmap->N, &(abt->buf[0]), B->rmap->N * B->cmap->N, &(abt->buf[1]));
2693: PetscMalloc2(size,&(abt->recvcounts),size+1,&(abt->recvdispls));
2694: for (i = 0; i <= size; i++) abt->recvdispls[i] = B->rmap->range[i] * A->cmap->N;
2695: for (i = 0; i < size; i++) abt->recvcounts[i] = abt->recvdispls[i + 1] - abt->recvdispls[i];
2696: break;
2697: }
2699: C->product->data = abt;
2700: C->product->destroy = MatDestroy_MatMatTransMult_MPIDense_MPIDense;
2701: C->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_MPIDense_MPIDense;
2702: return(0);
2703: }
2705: static PetscErrorCode MatMatTransposeMultNumeric_MPIDense_MPIDense_Cyclic(Mat A, Mat B, Mat C)
2706: {
2707: Mat_MPIDense *a=(Mat_MPIDense*)A->data, *b=(Mat_MPIDense*)B->data, *c=(Mat_MPIDense*)C->data;
2708: Mat_MatTransMultDense *abt;
2709: PetscErrorCode ierr;
2710: MPI_Comm comm;
2711: PetscMPIInt rank,size, sendsiz, recvsiz, sendto, recvfrom, recvisfrom;
2712: PetscScalar *sendbuf, *recvbuf=NULL, *cv;
2713: PetscInt i,cK=A->cmap->N,k,j,bn;
2714: PetscScalar _DOne=1.0,_DZero=0.0;
2715: const PetscScalar *av,*bv;
2716: PetscBLASInt cm, cn, ck, alda, blda = 0, clda;
2717: MPI_Request reqs[2];
2718: const PetscInt *ranges;
2721: MatCheckProduct(C,3);
2722: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
2723: abt = (Mat_MatTransMultDense*)C->product->data;
2724: PetscObjectGetComm((PetscObject)C,&comm);
2725: MPI_Comm_rank(comm,&rank);
2726: MPI_Comm_size(comm,&size);
2727: MatDenseGetArrayRead(a->A,&av);
2728: MatDenseGetArrayRead(b->A,&bv);
2729: MatDenseGetArrayWrite(c->A,&cv);
2730: MatDenseGetLDA(a->A,&i);
2731: PetscBLASIntCast(i,&alda);
2732: MatDenseGetLDA(b->A,&i);
2733: PetscBLASIntCast(i,&blda);
2734: MatDenseGetLDA(c->A,&i);
2735: PetscBLASIntCast(i,&clda);
2736: MatGetOwnershipRanges(B,&ranges);
2737: bn = B->rmap->n;
2738: if (blda == bn) {
2739: sendbuf = (PetscScalar*)bv;
2740: } else {
2741: sendbuf = abt->buf[0];
2742: for (k = 0, i = 0; i < cK; i++) {
2743: for (j = 0; j < bn; j++, k++) {
2744: sendbuf[k] = bv[i * blda + j];
2745: }
2746: }
2747: }
2748: if (size > 1) {
2749: sendto = (rank + size - 1) % size;
2750: recvfrom = (rank + size + 1) % size;
2751: } else {
2752: sendto = recvfrom = 0;
2753: }
2754: PetscBLASIntCast(cK,&ck);
2755: PetscBLASIntCast(c->A->rmap->n,&cm);
2756: recvisfrom = rank;
2757: for (i = 0; i < size; i++) {
2758: /* we have finished receiving in sending, bufs can be read/modified */
2759: PetscInt nextrecvisfrom = (recvisfrom + 1) % size; /* which process the next recvbuf will originate on */
2760: PetscInt nextbn = ranges[nextrecvisfrom + 1] - ranges[nextrecvisfrom];
2762: if (nextrecvisfrom != rank) {
2763: /* start the cyclic sends from sendbuf, to recvbuf (which will switch to sendbuf) */
2764: sendsiz = cK * bn;
2765: recvsiz = cK * nextbn;
2766: recvbuf = (i & 1) ? abt->buf[0] : abt->buf[1];
2767: MPI_Isend(sendbuf, sendsiz, MPIU_SCALAR, sendto, abt->tag, comm, &reqs[0]);
2768: MPI_Irecv(recvbuf, recvsiz, MPIU_SCALAR, recvfrom, abt->tag, comm, &reqs[1]);
2769: }
2771: /* local aseq * sendbuf^T */
2772: PetscBLASIntCast(ranges[recvisfrom + 1] - ranges[recvisfrom], &cn);
2773: if (cm && cn && ck) PetscStackCallBLAS("BLASgemm",BLASgemm_("N","T",&cm,&cn,&ck,&_DOne,av,&alda,sendbuf,&cn,&_DZero,cv + clda * ranges[recvisfrom],&clda));
2775: if (nextrecvisfrom != rank) {
2776: /* wait for the sends and receives to complete, swap sendbuf and recvbuf */
2777: MPI_Waitall(2, reqs, MPI_STATUSES_IGNORE);
2778: }
2779: bn = nextbn;
2780: recvisfrom = nextrecvisfrom;
2781: sendbuf = recvbuf;
2782: }
2783: MatDenseRestoreArrayRead(a->A,&av);
2784: MatDenseRestoreArrayRead(b->A,&bv);
2785: MatDenseRestoreArrayWrite(c->A,&cv);
2786: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2787: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2788: return(0);
2789: }
2791: static PetscErrorCode MatMatTransposeMultNumeric_MPIDense_MPIDense_Allgatherv(Mat A, Mat B, Mat C)
2792: {
2793: Mat_MPIDense *a=(Mat_MPIDense*)A->data, *b=(Mat_MPIDense*)B->data, *c=(Mat_MPIDense*)C->data;
2794: Mat_MatTransMultDense *abt;
2795: PetscErrorCode ierr;
2796: MPI_Comm comm;
2797: PetscMPIInt size;
2798: PetscScalar *cv, *sendbuf, *recvbuf;
2799: const PetscScalar *av,*bv;
2800: PetscInt blda,i,cK=A->cmap->N,k,j,bn;
2801: PetscScalar _DOne=1.0,_DZero=0.0;
2802: PetscBLASInt cm, cn, ck, alda, clda;
2805: MatCheckProduct(C,3);
2806: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
2807: abt = (Mat_MatTransMultDense*)C->product->data;
2808: PetscObjectGetComm((PetscObject)A,&comm);
2809: MPI_Comm_size(comm,&size);
2810: MatDenseGetArrayRead(a->A,&av);
2811: MatDenseGetArrayRead(b->A,&bv);
2812: MatDenseGetArrayWrite(c->A,&cv);
2813: MatDenseGetLDA(a->A,&i);
2814: PetscBLASIntCast(i,&alda);
2815: MatDenseGetLDA(b->A,&blda);
2816: MatDenseGetLDA(c->A,&i);
2817: PetscBLASIntCast(i,&clda);
2818: /* copy transpose of B into buf[0] */
2819: bn = B->rmap->n;
2820: sendbuf = abt->buf[0];
2821: recvbuf = abt->buf[1];
2822: for (k = 0, j = 0; j < bn; j++) {
2823: for (i = 0; i < cK; i++, k++) {
2824: sendbuf[k] = bv[i * blda + j];
2825: }
2826: }
2827: MatDenseRestoreArrayRead(b->A,&bv);
2828: MPI_Allgatherv(sendbuf, bn * cK, MPIU_SCALAR, recvbuf, abt->recvcounts, abt->recvdispls, MPIU_SCALAR, comm);
2829: PetscBLASIntCast(cK,&ck);
2830: PetscBLASIntCast(c->A->rmap->n,&cm);
2831: PetscBLASIntCast(c->A->cmap->n,&cn);
2832: if (cm && cn && ck) PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&cm,&cn,&ck,&_DOne,av,&alda,recvbuf,&ck,&_DZero,cv,&clda));
2833: MatDenseRestoreArrayRead(a->A,&av);
2834: MatDenseRestoreArrayRead(b->A,&bv);
2835: MatDenseRestoreArrayWrite(c->A,&cv);
2836: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2837: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2838: return(0);
2839: }
2841: static PetscErrorCode MatMatTransposeMultNumeric_MPIDense_MPIDense(Mat A, Mat B, Mat C)
2842: {
2843: Mat_MatTransMultDense *abt;
2844: PetscErrorCode ierr;
2847: MatCheckProduct(C,3);
2848: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
2849: abt = (Mat_MatTransMultDense*)C->product->data;
2850: switch (abt->alg) {
2851: case 1:
2852: MatMatTransposeMultNumeric_MPIDense_MPIDense_Cyclic(A, B, C);
2853: break;
2854: default:
2855: MatMatTransposeMultNumeric_MPIDense_MPIDense_Allgatherv(A, B, C);
2856: break;
2857: }
2858: return(0);
2859: }
2861: PetscErrorCode MatDestroy_MatMatMult_MPIDense_MPIDense(void *data)
2862: {
2863: PetscErrorCode ierr;
2864: Mat_MatMultDense *ab = (Mat_MatMultDense*)data;
2867: MatDestroy(&ab->Ce);
2868: MatDestroy(&ab->Ae);
2869: MatDestroy(&ab->Be);
2870: PetscFree(ab);
2871: return(0);
2872: }
2874: #if defined(PETSC_HAVE_ELEMENTAL)
2875: PetscErrorCode MatMatMultNumeric_MPIDense_MPIDense(Mat A,Mat B,Mat C)
2876: {
2877: PetscErrorCode ierr;
2878: Mat_MatMultDense *ab;
2881: MatCheckProduct(C,3);
2882: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing product data");
2883: ab = (Mat_MatMultDense*)C->product->data;
2884: MatConvert_MPIDense_Elemental(A,MATELEMENTAL,MAT_REUSE_MATRIX, &ab->Ae);
2885: MatConvert_MPIDense_Elemental(B,MATELEMENTAL,MAT_REUSE_MATRIX, &ab->Be);
2886: MatMatMultNumeric_Elemental(ab->Ae,ab->Be,ab->Ce);
2887: MatConvert(ab->Ce,MATMPIDENSE,MAT_REUSE_MATRIX,&C);
2888: return(0);
2889: }
2891: static PetscErrorCode MatMatMultSymbolic_MPIDense_MPIDense(Mat A,Mat B,PetscReal fill,Mat C)
2892: {
2893: PetscErrorCode ierr;
2894: Mat Ae,Be,Ce;
2895: Mat_MatMultDense *ab;
2898: MatCheckProduct(C,4);
2899: if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
2900: /* check local size of A and B */
2901: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
2902: SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
2903: }
2905: /* create elemental matrices Ae and Be */
2906: MatCreate(PetscObjectComm((PetscObject)A), &Ae);
2907: MatSetSizes(Ae,PETSC_DECIDE,PETSC_DECIDE,A->rmap->N,A->cmap->N);
2908: MatSetType(Ae,MATELEMENTAL);
2909: MatSetUp(Ae);
2910: MatSetOption(Ae,MAT_ROW_ORIENTED,PETSC_FALSE);
2912: MatCreate(PetscObjectComm((PetscObject)B), &Be);
2913: MatSetSizes(Be,PETSC_DECIDE,PETSC_DECIDE,B->rmap->N,B->cmap->N);
2914: MatSetType(Be,MATELEMENTAL);
2915: MatSetUp(Be);
2916: MatSetOption(Be,MAT_ROW_ORIENTED,PETSC_FALSE);
2918: /* compute symbolic Ce = Ae*Be */
2919: MatCreate(PetscObjectComm((PetscObject)C),&Ce);
2920: MatMatMultSymbolic_Elemental(Ae,Be,fill,Ce);
2922: /* setup C */
2923: MatSetSizes(C,A->rmap->n,B->cmap->n,PETSC_DECIDE,PETSC_DECIDE);
2924: MatSetType(C,MATDENSE);
2925: MatSetUp(C);
2927: /* create data structure for reuse Cdense */
2928: PetscNew(&ab);
2929: ab->Ae = Ae;
2930: ab->Be = Be;
2931: ab->Ce = Ce;
2933: C->product->data = ab;
2934: C->product->destroy = MatDestroy_MatMatMult_MPIDense_MPIDense;
2935: C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIDense;
2936: return(0);
2937: }
2938: #endif
2939: /* ----------------------------------------------- */
2940: #if defined(PETSC_HAVE_ELEMENTAL)
2941: static PetscErrorCode MatProductSetFromOptions_MPIDense_AB(Mat C)
2942: {
2944: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIDense;
2945: C->ops->productsymbolic = MatProductSymbolic_AB;
2946: return(0);
2947: }
2948: #endif
2950: static PetscErrorCode MatProductSetFromOptions_MPIDense_AtB(Mat C)
2951: {
2952: Mat_Product *product = C->product;
2953: Mat A = product->A,B=product->B;
2956: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
2957: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
2958: C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIDense_MPIDense;
2959: C->ops->productsymbolic = MatProductSymbolic_AtB;
2960: return(0);
2961: }
2963: static PetscErrorCode MatProductSetFromOptions_MPIDense_ABt(Mat C)
2964: {
2966: Mat_Product *product = C->product;
2967: const char *algTypes[2] = {"allgatherv","cyclic"};
2968: PetscInt alg,nalg = 2;
2969: PetscBool flg = PETSC_FALSE;
2972: /* Set default algorithm */
2973: alg = 0; /* default is allgatherv */
2974: PetscStrcmp(product->alg,"default",&flg);
2975: if (flg) {
2976: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2977: }
2979: /* Get runtime option */
2980: if (product->api_user) {
2981: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatTransposeMult","Mat");
2982: PetscOptionsEList("-matmattransmult_mpidense_mpidense_via","Algorithmic approach","MatMatTransposeMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2983: PetscOptionsEnd();
2984: } else {
2985: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_ABt","Mat");
2986: PetscOptionsEList("-matproduct_abt_mpidense_mpidense_via","Algorithmic approach","MatProduct_ABt",algTypes,nalg,algTypes[alg],&alg,&flg);
2987: PetscOptionsEnd();
2988: }
2989: if (flg) {
2990: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2991: }
2993: C->ops->mattransposemultsymbolic = MatMatTransposeMultSymbolic_MPIDense_MPIDense;
2994: C->ops->productsymbolic = MatProductSymbolic_ABt;
2995: return(0);
2996: }
2998: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense(Mat C)
2999: {
3001: Mat_Product *product = C->product;
3004: switch (product->type) {
3005: #if defined(PETSC_HAVE_ELEMENTAL)
3006: case MATPRODUCT_AB:
3007: MatProductSetFromOptions_MPIDense_AB(C);
3008: break;
3009: #endif
3010: case MATPRODUCT_AtB:
3011: MatProductSetFromOptions_MPIDense_AtB(C);
3012: break;
3013: case MATPRODUCT_ABt:
3014: MatProductSetFromOptions_MPIDense_ABt(C);
3015: break;
3016: default:
3017: break;
3018: }
3019: return(0);
3020: }