Actual source code: mpiaij.c
petsc-3.11.0 2019-03-29
3: #include <../src/mat/impls/aij/mpi/mpiaij.h>
4: #include <petsc/private/vecimpl.h>
5: #include <petsc/private/vecscatterimpl.h>
6: #include <petsc/private/isimpl.h>
7: #include <petscblaslapack.h>
8: #include <petscsf.h>
10: /*MC
11: MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.
13: This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
14: and MATMPIAIJ otherwise. As a result, for single process communicators,
15: MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
16: for communicators controlling multiple processes. It is recommended that you call both of
17: the above preallocation routines for simplicity.
19: Options Database Keys:
20: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()
22: Developer Notes:
23: Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
24: enough exist.
26: Level: beginner
28: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
29: M*/
31: /*MC
32: MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.
34: This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
35: and MATMPIAIJCRL otherwise. As a result, for single process communicators,
36: MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
37: for communicators controlling multiple processes. It is recommended that you call both of
38: the above preallocation routines for simplicity.
40: Options Database Keys:
41: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()
43: Level: beginner
45: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
46: M*/
48: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
49: {
51: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data;
54: if (mat->A) {
55: MatSetBlockSizes(mat->A,rbs,cbs);
56: MatSetBlockSizes(mat->B,rbs,1);
57: }
58: return(0);
59: }
61: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
62: {
63: PetscErrorCode ierr;
64: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data;
65: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data;
66: Mat_SeqAIJ *b = (Mat_SeqAIJ*)mat->B->data;
67: const PetscInt *ia,*ib;
68: const MatScalar *aa,*bb;
69: PetscInt na,nb,i,j,*rows,cnt=0,n0rows;
70: PetscInt m = M->rmap->n,rstart = M->rmap->rstart;
73: *keptrows = 0;
74: ia = a->i;
75: ib = b->i;
76: for (i=0; i<m; i++) {
77: na = ia[i+1] - ia[i];
78: nb = ib[i+1] - ib[i];
79: if (!na && !nb) {
80: cnt++;
81: goto ok1;
82: }
83: aa = a->a + ia[i];
84: for (j=0; j<na; j++) {
85: if (aa[j] != 0.0) goto ok1;
86: }
87: bb = b->a + ib[i];
88: for (j=0; j <nb; j++) {
89: if (bb[j] != 0.0) goto ok1;
90: }
91: cnt++;
92: ok1:;
93: }
94: MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
95: if (!n0rows) return(0);
96: PetscMalloc1(M->rmap->n-cnt,&rows);
97: cnt = 0;
98: for (i=0; i<m; i++) {
99: na = ia[i+1] - ia[i];
100: nb = ib[i+1] - ib[i];
101: if (!na && !nb) continue;
102: aa = a->a + ia[i];
103: for (j=0; j<na;j++) {
104: if (aa[j] != 0.0) {
105: rows[cnt++] = rstart + i;
106: goto ok2;
107: }
108: }
109: bb = b->a + ib[i];
110: for (j=0; j<nb; j++) {
111: if (bb[j] != 0.0) {
112: rows[cnt++] = rstart + i;
113: goto ok2;
114: }
115: }
116: ok2:;
117: }
118: ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
119: return(0);
120: }
122: PetscErrorCode MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
123: {
124: PetscErrorCode ierr;
125: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) Y->data;
126: PetscBool cong;
129: MatHasCongruentLayouts(Y,&cong);
130: if (Y->assembled && cong) {
131: MatDiagonalSet(aij->A,D,is);
132: } else {
133: MatDiagonalSet_Default(Y,D,is);
134: }
135: return(0);
136: }
138: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
139: {
140: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)M->data;
142: PetscInt i,rstart,nrows,*rows;
145: *zrows = NULL;
146: MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
147: MatGetOwnershipRange(M,&rstart,NULL);
148: for (i=0; i<nrows; i++) rows[i] += rstart;
149: ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
150: return(0);
151: }
153: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
154: {
156: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
157: PetscInt i,n,*garray = aij->garray;
158: Mat_SeqAIJ *a_aij = (Mat_SeqAIJ*) aij->A->data;
159: Mat_SeqAIJ *b_aij = (Mat_SeqAIJ*) aij->B->data;
160: PetscReal *work;
163: MatGetSize(A,NULL,&n);
164: PetscCalloc1(n,&work);
165: if (type == NORM_2) {
166: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
167: work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
168: }
169: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
170: work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
171: }
172: } else if (type == NORM_1) {
173: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
174: work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
175: }
176: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
177: work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
178: }
179: } else if (type == NORM_INFINITY) {
180: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
181: work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
182: }
183: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
184: work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
185: }
187: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
188: if (type == NORM_INFINITY) {
189: MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
190: } else {
191: MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
192: }
193: PetscFree(work);
194: if (type == NORM_2) {
195: for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
196: }
197: return(0);
198: }
200: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
201: {
202: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
203: IS sis,gis;
204: PetscErrorCode ierr;
205: const PetscInt *isis,*igis;
206: PetscInt n,*iis,nsis,ngis,rstart,i;
209: MatFindOffBlockDiagonalEntries(a->A,&sis);
210: MatFindNonzeroRows(a->B,&gis);
211: ISGetSize(gis,&ngis);
212: ISGetSize(sis,&nsis);
213: ISGetIndices(sis,&isis);
214: ISGetIndices(gis,&igis);
216: PetscMalloc1(ngis+nsis,&iis);
217: PetscMemcpy(iis,igis,ngis*sizeof(PetscInt));
218: PetscMemcpy(iis+ngis,isis,nsis*sizeof(PetscInt));
219: n = ngis + nsis;
220: PetscSortRemoveDupsInt(&n,iis);
221: MatGetOwnershipRange(A,&rstart,NULL);
222: for (i=0; i<n; i++) iis[i] += rstart;
223: ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);
225: ISRestoreIndices(sis,&isis);
226: ISRestoreIndices(gis,&igis);
227: ISDestroy(&sis);
228: ISDestroy(&gis);
229: return(0);
230: }
232: /*
233: Distributes a SeqAIJ matrix across a set of processes. Code stolen from
234: MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.
236: Only for square matrices
238: Used by a preconditioner, hence PETSC_EXTERN
239: */
240: PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
241: {
242: PetscMPIInt rank,size;
243: PetscInt *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
245: Mat mat;
246: Mat_SeqAIJ *gmata;
247: PetscMPIInt tag;
248: MPI_Status status;
249: PetscBool aij;
250: MatScalar *gmataa,*ao,*ad,*gmataarestore=0;
253: MPI_Comm_rank(comm,&rank);
254: MPI_Comm_size(comm,&size);
255: if (!rank) {
256: PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
257: if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
258: }
259: if (reuse == MAT_INITIAL_MATRIX) {
260: MatCreate(comm,&mat);
261: MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
262: MatGetBlockSizes(gmat,&bses[0],&bses[1]);
263: MPI_Bcast(bses,2,MPIU_INT,0,comm);
264: MatSetBlockSizes(mat,bses[0],bses[1]);
265: MatSetType(mat,MATAIJ);
266: PetscMalloc1(size+1,&rowners);
267: PetscMalloc2(m,&dlens,m,&olens);
268: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
270: rowners[0] = 0;
271: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
272: rstart = rowners[rank];
273: rend = rowners[rank+1];
274: PetscObjectGetNewTag((PetscObject)mat,&tag);
275: if (!rank) {
276: gmata = (Mat_SeqAIJ*) gmat->data;
277: /* send row lengths to all processors */
278: for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
279: for (i=1; i<size; i++) {
280: MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
281: }
282: /* determine number diagonal and off-diagonal counts */
283: PetscMemzero(olens,m*sizeof(PetscInt));
284: PetscCalloc1(m,&ld);
285: jj = 0;
286: for (i=0; i<m; i++) {
287: for (j=0; j<dlens[i]; j++) {
288: if (gmata->j[jj] < rstart) ld[i]++;
289: if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
290: jj++;
291: }
292: }
293: /* send column indices to other processes */
294: for (i=1; i<size; i++) {
295: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
296: MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
297: MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
298: }
300: /* send numerical values to other processes */
301: for (i=1; i<size; i++) {
302: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
303: MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
304: }
305: gmataa = gmata->a;
306: gmataj = gmata->j;
308: } else {
309: /* receive row lengths */
310: MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
311: /* receive column indices */
312: MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
313: PetscMalloc2(nz,&gmataa,nz,&gmataj);
314: MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
315: /* determine number diagonal and off-diagonal counts */
316: PetscMemzero(olens,m*sizeof(PetscInt));
317: PetscCalloc1(m,&ld);
318: jj = 0;
319: for (i=0; i<m; i++) {
320: for (j=0; j<dlens[i]; j++) {
321: if (gmataj[jj] < rstart) ld[i]++;
322: if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
323: jj++;
324: }
325: }
326: /* receive numerical values */
327: PetscMemzero(gmataa,nz*sizeof(PetscScalar));
328: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
329: }
330: /* set preallocation */
331: for (i=0; i<m; i++) {
332: dlens[i] -= olens[i];
333: }
334: MatSeqAIJSetPreallocation(mat,0,dlens);
335: MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);
337: for (i=0; i<m; i++) {
338: dlens[i] += olens[i];
339: }
340: cnt = 0;
341: for (i=0; i<m; i++) {
342: row = rstart + i;
343: MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
344: cnt += dlens[i];
345: }
346: if (rank) {
347: PetscFree2(gmataa,gmataj);
348: }
349: PetscFree2(dlens,olens);
350: PetscFree(rowners);
352: ((Mat_MPIAIJ*)(mat->data))->ld = ld;
354: *inmat = mat;
355: } else { /* column indices are already set; only need to move over numerical values from process 0 */
356: Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
357: Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
358: mat = *inmat;
359: PetscObjectGetNewTag((PetscObject)mat,&tag);
360: if (!rank) {
361: /* send numerical values to other processes */
362: gmata = (Mat_SeqAIJ*) gmat->data;
363: MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
364: gmataa = gmata->a;
365: for (i=1; i<size; i++) {
366: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
367: MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
368: }
369: nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
370: } else {
371: /* receive numerical values from process 0*/
372: nz = Ad->nz + Ao->nz;
373: PetscMalloc1(nz,&gmataa); gmataarestore = gmataa;
374: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
375: }
376: /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
377: ld = ((Mat_MPIAIJ*)(mat->data))->ld;
378: ad = Ad->a;
379: ao = Ao->a;
380: if (mat->rmap->n) {
381: i = 0;
382: nz = ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
383: nz = Ad->i[i+1] - Ad->i[i]; PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
384: }
385: for (i=1; i<mat->rmap->n; i++) {
386: nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
387: nz = Ad->i[i+1] - Ad->i[i]; PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
388: }
389: i--;
390: if (mat->rmap->n) {
391: nz = Ao->i[i+1] - Ao->i[i] - ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));
392: }
393: if (rank) {
394: PetscFree(gmataarestore);
395: }
396: }
397: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
398: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
399: return(0);
400: }
402: /*
403: Local utility routine that creates a mapping from the global column
404: number to the local number in the off-diagonal part of the local
405: storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at
406: a slightly higher hash table cost; without it it is not scalable (each processor
407: has an order N integer array but is fast to acess.
408: */
409: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
410: {
411: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
413: PetscInt n = aij->B->cmap->n,i;
416: if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
417: #if defined(PETSC_USE_CTABLE)
418: PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
419: for (i=0; i<n; i++) {
420: PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
421: }
422: #else
423: PetscCalloc1(mat->cmap->N+1,&aij->colmap);
424: PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
425: for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
426: #endif
427: return(0);
428: }
430: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol) \
431: { \
432: if (col <= lastcol1) low1 = 0; \
433: else high1 = nrow1; \
434: lastcol1 = col;\
435: while (high1-low1 > 5) { \
436: t = (low1+high1)/2; \
437: if (rp1[t] > col) high1 = t; \
438: else low1 = t; \
439: } \
440: for (_i=low1; _i<high1; _i++) { \
441: if (rp1[_i] > col) break; \
442: if (rp1[_i] == col) { \
443: if (addv == ADD_VALUES) ap1[_i] += value; \
444: else ap1[_i] = value; \
445: goto a_noinsert; \
446: } \
447: } \
448: if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
449: if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;} \
450: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
451: MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
452: N = nrow1++ - 1; a->nz++; high1++; \
453: /* shift up all the later entries in this row */ \
454: for (ii=N; ii>=_i; ii--) { \
455: rp1[ii+1] = rp1[ii]; \
456: ap1[ii+1] = ap1[ii]; \
457: } \
458: rp1[_i] = col; \
459: ap1[_i] = value; \
460: A->nonzerostate++;\
461: a_noinsert: ; \
462: ailen[row] = nrow1; \
463: }
465: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
466: { \
467: if (col <= lastcol2) low2 = 0; \
468: else high2 = nrow2; \
469: lastcol2 = col; \
470: while (high2-low2 > 5) { \
471: t = (low2+high2)/2; \
472: if (rp2[t] > col) high2 = t; \
473: else low2 = t; \
474: } \
475: for (_i=low2; _i<high2; _i++) { \
476: if (rp2[_i] > col) break; \
477: if (rp2[_i] == col) { \
478: if (addv == ADD_VALUES) ap2[_i] += value; \
479: else ap2[_i] = value; \
480: goto b_noinsert; \
481: } \
482: } \
483: if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
484: if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
485: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
486: MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
487: N = nrow2++ - 1; b->nz++; high2++; \
488: /* shift up all the later entries in this row */ \
489: for (ii=N; ii>=_i; ii--) { \
490: rp2[ii+1] = rp2[ii]; \
491: ap2[ii+1] = ap2[ii]; \
492: } \
493: rp2[_i] = col; \
494: ap2[_i] = value; \
495: B->nonzerostate++; \
496: b_noinsert: ; \
497: bilen[row] = nrow2; \
498: }
500: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
501: {
502: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
503: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
505: PetscInt l,*garray = mat->garray,diag;
508: /* code only works for square matrices A */
510: /* find size of row to the left of the diagonal part */
511: MatGetOwnershipRange(A,&diag,0);
512: row = row - diag;
513: for (l=0; l<b->i[row+1]-b->i[row]; l++) {
514: if (garray[b->j[b->i[row]+l]] > diag) break;
515: }
516: PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));
518: /* diagonal part */
519: PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));
521: /* right of diagonal part */
522: PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));
523: return(0);
524: }
526: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
527: {
528: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
529: PetscScalar value;
531: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
532: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
533: PetscBool roworiented = aij->roworiented;
535: /* Some Variables required in the macro */
536: Mat A = aij->A;
537: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
538: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
539: MatScalar *aa = a->a;
540: PetscBool ignorezeroentries = a->ignorezeroentries;
541: Mat B = aij->B;
542: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
543: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
544: MatScalar *ba = b->a;
546: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
547: PetscInt nonew;
548: MatScalar *ap1,*ap2;
551: for (i=0; i<m; i++) {
552: if (im[i] < 0) continue;
553: #if defined(PETSC_USE_DEBUG)
554: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
555: #endif
556: if (im[i] >= rstart && im[i] < rend) {
557: row = im[i] - rstart;
558: lastcol1 = -1;
559: rp1 = aj + ai[row];
560: ap1 = aa + ai[row];
561: rmax1 = aimax[row];
562: nrow1 = ailen[row];
563: low1 = 0;
564: high1 = nrow1;
565: lastcol2 = -1;
566: rp2 = bj + bi[row];
567: ap2 = ba + bi[row];
568: rmax2 = bimax[row];
569: nrow2 = bilen[row];
570: low2 = 0;
571: high2 = nrow2;
573: for (j=0; j<n; j++) {
574: if (roworiented) value = v[i*n+j];
575: else value = v[i+j*m];
576: if (in[j] >= cstart && in[j] < cend) {
577: col = in[j] - cstart;
578: nonew = a->nonew;
579: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
580: MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
581: } else if (in[j] < 0) continue;
582: #if defined(PETSC_USE_DEBUG)
583: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
584: #endif
585: else {
586: if (mat->was_assembled) {
587: if (!aij->colmap) {
588: MatCreateColmap_MPIAIJ_Private(mat);
589: }
590: #if defined(PETSC_USE_CTABLE)
591: PetscTableFind(aij->colmap,in[j]+1,&col);
592: col--;
593: #else
594: col = aij->colmap[in[j]] - 1;
595: #endif
596: if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
597: MatDisAssemble_MPIAIJ(mat);
598: col = in[j];
599: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
600: B = aij->B;
601: b = (Mat_SeqAIJ*)B->data;
602: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
603: rp2 = bj + bi[row];
604: ap2 = ba + bi[row];
605: rmax2 = bimax[row];
606: nrow2 = bilen[row];
607: low2 = 0;
608: high2 = nrow2;
609: bm = aij->B->rmap->n;
610: ba = b->a;
611: } else if (col < 0) {
612: if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
613: PetscInfo3(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%D,%D)\n",(double)PetscRealPart(value),im[i],in[j]);
614: } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
615: }
616: } else col = in[j];
617: nonew = b->nonew;
618: MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
619: }
620: }
621: } else {
622: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
623: if (!aij->donotstash) {
624: mat->assembled = PETSC_FALSE;
625: if (roworiented) {
626: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
627: } else {
628: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
629: }
630: }
631: }
632: }
633: return(0);
634: }
636: /*
637: This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
638: The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
639: No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
640: */
641: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
642: {
643: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
644: Mat A = aij->A; /* diagonal part of the matrix */
645: Mat B = aij->B; /* offdiagonal part of the matrix */
646: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
647: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
648: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,col;
649: PetscInt *ailen = a->ilen,*aj = a->j;
650: PetscInt *bilen = b->ilen,*bj = b->j;
651: PetscInt am = aij->A->rmap->n,j;
652: PetscInt diag_so_far = 0,dnz;
653: PetscInt offd_so_far = 0,onz;
656: /* Iterate over all rows of the matrix */
657: for (j=0; j<am; j++) {
658: dnz = onz = 0;
659: /* Iterate over all non-zero columns of the current row */
660: for (col=mat_i[j]; col<mat_i[j+1]; col++) {
661: /* If column is in the diagonal */
662: if (mat_j[col] >= cstart && mat_j[col] < cend) {
663: aj[diag_so_far++] = mat_j[col] - cstart;
664: dnz++;
665: } else { /* off-diagonal entries */
666: bj[offd_so_far++] = mat_j[col];
667: onz++;
668: }
669: }
670: ailen[j] = dnz;
671: bilen[j] = onz;
672: }
673: return(0);
674: }
676: /*
677: This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
678: The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
679: No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
680: Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
681: would not be true and the more complex MatSetValues_MPIAIJ has to be used.
682: */
683: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
684: {
685: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
686: Mat A = aij->A; /* diagonal part of the matrix */
687: Mat B = aij->B; /* offdiagonal part of the matrix */
688: Mat_SeqAIJ *aijd =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
689: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
690: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
691: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend;
692: PetscInt *ailen = a->ilen,*aj = a->j;
693: PetscInt *bilen = b->ilen,*bj = b->j;
694: PetscInt am = aij->A->rmap->n,j;
695: PetscInt *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
696: PetscInt col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
697: PetscScalar *aa = a->a,*ba = b->a;
700: /* Iterate over all rows of the matrix */
701: for (j=0; j<am; j++) {
702: dnz_row = onz_row = 0;
703: rowstart_offd = full_offd_i[j];
704: rowstart_diag = full_diag_i[j];
705: /* Iterate over all non-zero columns of the current row */
706: for (col=mat_i[j]; col<mat_i[j+1]; col++) {
707: /* If column is in the diagonal */
708: if (mat_j[col] >= cstart && mat_j[col] < cend) {
709: aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
710: aa[rowstart_diag+dnz_row] = mat_a[col];
711: dnz_row++;
712: } else { /* off-diagonal entries */
713: bj[rowstart_offd+onz_row] = mat_j[col];
714: ba[rowstart_offd+onz_row] = mat_a[col];
715: onz_row++;
716: }
717: }
718: ailen[j] = dnz_row;
719: bilen[j] = onz_row;
720: }
721: return(0);
722: }
724: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
725: {
726: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
728: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
729: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
732: for (i=0; i<m; i++) {
733: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
734: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
735: if (idxm[i] >= rstart && idxm[i] < rend) {
736: row = idxm[i] - rstart;
737: for (j=0; j<n; j++) {
738: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
739: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
740: if (idxn[j] >= cstart && idxn[j] < cend) {
741: col = idxn[j] - cstart;
742: MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
743: } else {
744: if (!aij->colmap) {
745: MatCreateColmap_MPIAIJ_Private(mat);
746: }
747: #if defined(PETSC_USE_CTABLE)
748: PetscTableFind(aij->colmap,idxn[j]+1,&col);
749: col--;
750: #else
751: col = aij->colmap[idxn[j]] - 1;
752: #endif
753: if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
754: else {
755: MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
756: }
757: }
758: }
759: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
760: }
761: return(0);
762: }
764: extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);
766: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
767: {
768: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
770: PetscInt nstash,reallocs;
773: if (aij->donotstash || mat->nooffprocentries) return(0);
775: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
776: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
777: PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
778: return(0);
779: }
781: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
782: {
783: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
784: Mat_SeqAIJ *a = (Mat_SeqAIJ*)aij->A->data;
786: PetscMPIInt n;
787: PetscInt i,j,rstart,ncols,flg;
788: PetscInt *row,*col;
789: PetscBool other_disassembled;
790: PetscScalar *val;
792: /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */
795: if (!aij->donotstash && !mat->nooffprocentries) {
796: while (1) {
797: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
798: if (!flg) break;
800: for (i=0; i<n; ) {
801: /* Now identify the consecutive vals belonging to the same row */
802: for (j=i,rstart=row[j]; j<n; j++) {
803: if (row[j] != rstart) break;
804: }
805: if (j < n) ncols = j-i;
806: else ncols = n-i;
807: /* Now assemble all these values with a single function call */
808: MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
810: i = j;
811: }
812: }
813: MatStashScatterEnd_Private(&mat->stash);
814: }
815: MatAssemblyBegin(aij->A,mode);
816: MatAssemblyEnd(aij->A,mode);
818: /* determine if any processor has disassembled, if so we must
819: also disassemble ourselfs, in order that we may reassemble. */
820: /*
821: if nonzero structure of submatrix B cannot change then we know that
822: no processor disassembled thus we can skip this stuff
823: */
824: if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
825: MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
826: if (mat->was_assembled && !other_disassembled) {
827: MatDisAssemble_MPIAIJ(mat);
828: }
829: }
830: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
831: MatSetUpMultiply_MPIAIJ(mat);
832: }
833: MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
834: MatAssemblyBegin(aij->B,mode);
835: MatAssemblyEnd(aij->B,mode);
837: PetscFree2(aij->rowvalues,aij->rowindices);
839: aij->rowvalues = 0;
841: VecDestroy(&aij->diag);
842: if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;
844: /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
845: if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
846: PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
847: MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
848: }
849: return(0);
850: }
852: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
853: {
854: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
858: MatZeroEntries(l->A);
859: MatZeroEntries(l->B);
860: return(0);
861: }
863: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
864: {
865: Mat_MPIAIJ *mat = (Mat_MPIAIJ *) A->data;
866: PetscInt *lrows;
867: PetscInt r, len;
868: PetscBool cong;
872: /* get locally owned rows */
873: MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
874: /* fix right hand side if needed */
875: if (x && b) {
876: const PetscScalar *xx;
877: PetscScalar *bb;
879: VecGetArrayRead(x, &xx);
880: VecGetArray(b, &bb);
881: for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
882: VecRestoreArrayRead(x, &xx);
883: VecRestoreArray(b, &bb);
884: }
885: /* Must zero l->B before l->A because the (diag) case below may put values into l->B*/
886: MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
887: MatHasCongruentLayouts(A,&cong);
888: if ((diag != 0.0) && cong) {
889: MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
890: } else if (diag != 0.0) {
891: MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
892: if (((Mat_SeqAIJ *) mat->A->data)->nonew) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatZeroRows() on rectangular matrices cannot be used with the Mat options\nMAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
893: for (r = 0; r < len; ++r) {
894: const PetscInt row = lrows[r] + A->rmap->rstart;
895: MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
896: }
897: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
898: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
899: } else {
900: MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
901: }
902: PetscFree(lrows);
904: /* only change matrix nonzero state if pattern was allowed to be changed */
905: if (!((Mat_SeqAIJ*)(mat->A->data))->keepnonzeropattern) {
906: PetscObjectState state = mat->A->nonzerostate + mat->B->nonzerostate;
907: MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
908: }
909: return(0);
910: }
912: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
913: {
914: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
915: PetscErrorCode ierr;
916: PetscMPIInt n = A->rmap->n;
917: PetscInt i,j,r,m,p = 0,len = 0;
918: PetscInt *lrows,*owners = A->rmap->range;
919: PetscSFNode *rrows;
920: PetscSF sf;
921: const PetscScalar *xx;
922: PetscScalar *bb,*mask;
923: Vec xmask,lmask;
924: Mat_SeqAIJ *aij = (Mat_SeqAIJ*)l->B->data;
925: const PetscInt *aj, *ii,*ridx;
926: PetscScalar *aa;
929: /* Create SF where leaves are input rows and roots are owned rows */
930: PetscMalloc1(n, &lrows);
931: for (r = 0; r < n; ++r) lrows[r] = -1;
932: PetscMalloc1(N, &rrows);
933: for (r = 0; r < N; ++r) {
934: const PetscInt idx = rows[r];
935: if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
936: if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
937: PetscLayoutFindOwner(A->rmap,idx,&p);
938: }
939: rrows[r].rank = p;
940: rrows[r].index = rows[r] - owners[p];
941: }
942: PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
943: PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
944: /* Collect flags for rows to be zeroed */
945: PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
946: PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
947: PetscSFDestroy(&sf);
948: /* Compress and put in row numbers */
949: for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
950: /* zero diagonal part of matrix */
951: MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
952: /* handle off diagonal part of matrix */
953: MatCreateVecs(A,&xmask,NULL);
954: VecDuplicate(l->lvec,&lmask);
955: VecGetArray(xmask,&bb);
956: for (i=0; i<len; i++) bb[lrows[i]] = 1;
957: VecRestoreArray(xmask,&bb);
958: VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
959: VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
960: VecDestroy(&xmask);
961: if (x) {
962: VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
963: VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
964: VecGetArrayRead(l->lvec,&xx);
965: VecGetArray(b,&bb);
966: }
967: VecGetArray(lmask,&mask);
968: /* remove zeroed rows of off diagonal matrix */
969: ii = aij->i;
970: for (i=0; i<len; i++) {
971: PetscMemzero(aij->a + ii[lrows[i]],(ii[lrows[i]+1] - ii[lrows[i]])*sizeof(PetscScalar));
972: }
973: /* loop over all elements of off process part of matrix zeroing removed columns*/
974: if (aij->compressedrow.use) {
975: m = aij->compressedrow.nrows;
976: ii = aij->compressedrow.i;
977: ridx = aij->compressedrow.rindex;
978: for (i=0; i<m; i++) {
979: n = ii[i+1] - ii[i];
980: aj = aij->j + ii[i];
981: aa = aij->a + ii[i];
983: for (j=0; j<n; j++) {
984: if (PetscAbsScalar(mask[*aj])) {
985: if (b) bb[*ridx] -= *aa*xx[*aj];
986: *aa = 0.0;
987: }
988: aa++;
989: aj++;
990: }
991: ridx++;
992: }
993: } else { /* do not use compressed row format */
994: m = l->B->rmap->n;
995: for (i=0; i<m; i++) {
996: n = ii[i+1] - ii[i];
997: aj = aij->j + ii[i];
998: aa = aij->a + ii[i];
999: for (j=0; j<n; j++) {
1000: if (PetscAbsScalar(mask[*aj])) {
1001: if (b) bb[i] -= *aa*xx[*aj];
1002: *aa = 0.0;
1003: }
1004: aa++;
1005: aj++;
1006: }
1007: }
1008: }
1009: if (x) {
1010: VecRestoreArray(b,&bb);
1011: VecRestoreArrayRead(l->lvec,&xx);
1012: }
1013: VecRestoreArray(lmask,&mask);
1014: VecDestroy(&lmask);
1015: PetscFree(lrows);
1017: /* only change matrix nonzero state if pattern was allowed to be changed */
1018: if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
1019: PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1020: MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
1021: }
1022: return(0);
1023: }
1025: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
1026: {
1027: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1029: PetscInt nt;
1030: VecScatter Mvctx = a->Mvctx;
1033: VecGetLocalSize(xx,&nt);
1034: if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
1036: VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1037: (*a->A->ops->mult)(a->A,xx,yy);
1038: VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1039: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1040: return(0);
1041: }
1043: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
1044: {
1045: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1049: MatMultDiagonalBlock(a->A,bb,xx);
1050: return(0);
1051: }
1053: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1054: {
1055: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1057: VecScatter Mvctx = a->Mvctx;
1060: if (a->Mvctx_mpi1_flg) Mvctx = a->Mvctx_mpi1;
1061: VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1062: (*a->A->ops->multadd)(a->A,xx,yy,zz);
1063: VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1064: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1065: return(0);
1066: }
1068: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
1069: {
1070: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1074: /* do nondiagonal part */
1075: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1076: /* do local part */
1077: (*a->A->ops->multtranspose)(a->A,xx,yy);
1078: /* add partial results together */
1079: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1080: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1081: return(0);
1082: }
1084: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool *f)
1085: {
1086: MPI_Comm comm;
1087: Mat_MPIAIJ *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1088: Mat Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1089: IS Me,Notme;
1091: PetscInt M,N,first,last,*notme,i;
1092: PetscBool lf;
1093: PetscMPIInt size;
1096: /* Easy test: symmetric diagonal block */
1097: Bij = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1098: MatIsTranspose(Adia,Bdia,tol,&lf);
1099: MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
1100: if (!*f) return(0);
1101: PetscObjectGetComm((PetscObject)Amat,&comm);
1102: MPI_Comm_size(comm,&size);
1103: if (size == 1) return(0);
1105: /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1106: MatGetSize(Amat,&M,&N);
1107: MatGetOwnershipRange(Amat,&first,&last);
1108: PetscMalloc1(N-last+first,¬me);
1109: for (i=0; i<first; i++) notme[i] = i;
1110: for (i=last; i<M; i++) notme[i-last+first] = i;
1111: ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1112: ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1113: MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1114: Aoff = Aoffs[0];
1115: MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1116: Boff = Boffs[0];
1117: MatIsTranspose(Aoff,Boff,tol,f);
1118: MatDestroyMatrices(1,&Aoffs);
1119: MatDestroyMatrices(1,&Boffs);
1120: ISDestroy(&Me);
1121: ISDestroy(&Notme);
1122: PetscFree(notme);
1123: return(0);
1124: }
1126: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool *f)
1127: {
1131: MatIsTranspose_MPIAIJ(A,A,tol,f);
1132: return(0);
1133: }
1135: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1136: {
1137: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1141: /* do nondiagonal part */
1142: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1143: /* do local part */
1144: (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1145: /* add partial results together */
1146: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1147: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1148: return(0);
1149: }
1151: /*
1152: This only works correctly for square matrices where the subblock A->A is the
1153: diagonal block
1154: */
1155: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1156: {
1158: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1161: if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1162: if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1163: MatGetDiagonal(a->A,v);
1164: return(0);
1165: }
1167: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1168: {
1169: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1173: MatScale(a->A,aa);
1174: MatScale(a->B,aa);
1175: return(0);
1176: }
1178: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1179: {
1180: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1184: #if defined(PETSC_USE_LOG)
1185: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1186: #endif
1187: MatStashDestroy_Private(&mat->stash);
1188: VecDestroy(&aij->diag);
1189: MatDestroy(&aij->A);
1190: MatDestroy(&aij->B);
1191: #if defined(PETSC_USE_CTABLE)
1192: PetscTableDestroy(&aij->colmap);
1193: #else
1194: PetscFree(aij->colmap);
1195: #endif
1196: PetscFree(aij->garray);
1197: VecDestroy(&aij->lvec);
1198: VecScatterDestroy(&aij->Mvctx);
1199: if (aij->Mvctx_mpi1) {VecScatterDestroy(&aij->Mvctx_mpi1);}
1200: PetscFree2(aij->rowvalues,aij->rowindices);
1201: PetscFree(aij->ld);
1202: PetscFree(mat->data);
1204: PetscObjectChangeTypeName((PetscObject)mat,0);
1205: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1206: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1207: PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1208: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1209: PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1210: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1211: PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1212: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1213: #if defined(PETSC_HAVE_ELEMENTAL)
1214: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1215: #endif
1216: #if defined(PETSC_HAVE_HYPRE)
1217: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1218: PetscObjectComposeFunction((PetscObject)mat,"MatMatMatMult_transpose_mpiaij_mpiaij_C",NULL);
1219: #endif
1220: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1221: PetscObjectComposeFunction((PetscObject)mat,"MatPtAP_is_mpiaij_C",NULL);
1222: return(0);
1223: }
1225: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1226: {
1227: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1228: Mat_SeqAIJ *A = (Mat_SeqAIJ*)aij->A->data;
1229: Mat_SeqAIJ *B = (Mat_SeqAIJ*)aij->B->data;
1231: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
1232: int fd;
1233: PetscInt nz,header[4],*row_lengths,*range=0,rlen,i;
1234: PetscInt nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz = 0;
1235: PetscScalar *column_values;
1236: PetscInt message_count,flowcontrolcount;
1237: FILE *file;
1240: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1241: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
1242: nz = A->nz + B->nz;
1243: PetscViewerBinaryGetDescriptor(viewer,&fd);
1244: if (!rank) {
1245: header[0] = MAT_FILE_CLASSID;
1246: header[1] = mat->rmap->N;
1247: header[2] = mat->cmap->N;
1249: MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1250: PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
1251: /* get largest number of rows any processor has */
1252: rlen = mat->rmap->n;
1253: range = mat->rmap->range;
1254: for (i=1; i<size; i++) rlen = PetscMax(rlen,range[i+1] - range[i]);
1255: } else {
1256: MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1257: rlen = mat->rmap->n;
1258: }
1260: /* load up the local row counts */
1261: PetscMalloc1(rlen+1,&row_lengths);
1262: for (i=0; i<mat->rmap->n; i++) row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1264: /* store the row lengths to the file */
1265: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1266: if (!rank) {
1267: PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);
1268: for (i=1; i<size; i++) {
1269: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1270: rlen = range[i+1] - range[i];
1271: MPIULong_Recv(row_lengths,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1272: PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);
1273: }
1274: PetscViewerFlowControlEndMaster(viewer,&message_count);
1275: } else {
1276: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1277: MPIULong_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1278: PetscViewerFlowControlEndWorker(viewer,&message_count);
1279: }
1280: PetscFree(row_lengths);
1282: /* load up the local column indices */
1283: nzmax = nz; /* th processor needs space a largest processor needs */
1284: MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));
1285: PetscMalloc1(nzmax+1,&column_indices);
1286: cnt = 0;
1287: for (i=0; i<mat->rmap->n; i++) {
1288: for (j=B->i[i]; j<B->i[i+1]; j++) {
1289: if ((col = garray[B->j[j]]) > cstart) break;
1290: column_indices[cnt++] = col;
1291: }
1292: for (k=A->i[i]; k<A->i[i+1]; k++) column_indices[cnt++] = A->j[k] + cstart;
1293: for (; j<B->i[i+1]; j++) column_indices[cnt++] = garray[B->j[j]];
1294: }
1295: if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1297: /* store the column indices to the file */
1298: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1299: if (!rank) {
1300: MPI_Status status;
1301: PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
1302: for (i=1; i<size; i++) {
1303: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1304: MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1305: if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1306: MPIULong_Recv(column_indices,rnz,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1307: PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);
1308: }
1309: PetscViewerFlowControlEndMaster(viewer,&message_count);
1310: } else {
1311: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1312: MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1313: MPIULong_Send(column_indices,nz,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1314: PetscViewerFlowControlEndWorker(viewer,&message_count);
1315: }
1316: PetscFree(column_indices);
1318: /* load up the local column values */
1319: PetscMalloc1(nzmax+1,&column_values);
1320: cnt = 0;
1321: for (i=0; i<mat->rmap->n; i++) {
1322: for (j=B->i[i]; j<B->i[i+1]; j++) {
1323: if (garray[B->j[j]] > cstart) break;
1324: column_values[cnt++] = B->a[j];
1325: }
1326: for (k=A->i[i]; k<A->i[i+1]; k++) column_values[cnt++] = A->a[k];
1327: for (; j<B->i[i+1]; j++) column_values[cnt++] = B->a[j];
1328: }
1329: if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1331: /* store the column values to the file */
1332: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1333: if (!rank) {
1334: MPI_Status status;
1335: PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1336: for (i=1; i<size; i++) {
1337: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1338: MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1339: if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1340: MPIULong_Recv(column_values,rnz,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat));
1341: PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);
1342: }
1343: PetscViewerFlowControlEndMaster(viewer,&message_count);
1344: } else {
1345: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1346: MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1347: MPIULong_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));
1348: PetscViewerFlowControlEndWorker(viewer,&message_count);
1349: }
1350: PetscFree(column_values);
1352: PetscViewerBinaryGetInfoPointer(viewer,&file);
1353: if (file) fprintf(file,"-matload_block_size %d\n",(int)PetscAbs(mat->rmap->bs));
1354: return(0);
1355: }
1357: #include <petscdraw.h>
1358: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1359: {
1360: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1361: PetscErrorCode ierr;
1362: PetscMPIInt rank = aij->rank,size = aij->size;
1363: PetscBool isdraw,iascii,isbinary;
1364: PetscViewer sviewer;
1365: PetscViewerFormat format;
1368: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1369: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1370: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1371: if (iascii) {
1372: PetscViewerGetFormat(viewer,&format);
1373: if (format == PETSC_VIEWER_LOAD_BALANCE) {
1374: PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1375: PetscMalloc1(size,&nz);
1376: MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1377: for (i=0; i<(PetscInt)size; i++) {
1378: nmax = PetscMax(nmax,nz[i]);
1379: nmin = PetscMin(nmin,nz[i]);
1380: navg += nz[i];
1381: }
1382: PetscFree(nz);
1383: navg = navg/size;
1384: PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %D avg %D max %D\n",nmin,navg,nmax);
1385: return(0);
1386: }
1387: PetscViewerGetFormat(viewer,&format);
1388: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1389: MatInfo info;
1390: PetscBool inodes;
1392: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1393: MatGetInfo(mat,MAT_LOCAL,&info);
1394: MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);
1395: PetscViewerASCIIPushSynchronized(viewer);
1396: if (!inodes) {
1397: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, not using I-node routines\n",
1398: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1399: } else {
1400: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, using I-node routines\n",
1401: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1402: }
1403: MatGetInfo(aij->A,MAT_LOCAL,&info);
1404: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1405: MatGetInfo(aij->B,MAT_LOCAL,&info);
1406: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1407: PetscViewerFlush(viewer);
1408: PetscViewerASCIIPopSynchronized(viewer);
1409: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1410: VecScatterView(aij->Mvctx,viewer);
1411: return(0);
1412: } else if (format == PETSC_VIEWER_ASCII_INFO) {
1413: PetscInt inodecount,inodelimit,*inodes;
1414: MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1415: if (inodes) {
1416: PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1417: } else {
1418: PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1419: }
1420: return(0);
1421: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1422: return(0);
1423: }
1424: } else if (isbinary) {
1425: if (size == 1) {
1426: PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1427: MatView(aij->A,viewer);
1428: } else {
1429: MatView_MPIAIJ_Binary(mat,viewer);
1430: }
1431: return(0);
1432: } else if (isdraw) {
1433: PetscDraw draw;
1434: PetscBool isnull;
1435: PetscViewerDrawGetDraw(viewer,0,&draw);
1436: PetscDrawIsNull(draw,&isnull);
1437: if (isnull) return(0);
1438: }
1440: {
1441: /* assemble the entire matrix onto first processor. */
1442: Mat A;
1443: Mat_SeqAIJ *Aloc;
1444: PetscInt M = mat->rmap->N,N = mat->cmap->N,m,*ai,*aj,row,*cols,i,*ct;
1445: MatScalar *a;
1447: MatCreate(PetscObjectComm((PetscObject)mat),&A);
1448: if (!rank) {
1449: MatSetSizes(A,M,N,M,N);
1450: } else {
1451: MatSetSizes(A,0,0,M,N);
1452: }
1453: /* This is just a temporary matrix, so explicitly using MATMPIAIJ is probably best */
1454: MatSetType(A,MATMPIAIJ);
1455: MatMPIAIJSetPreallocation(A,0,NULL,0,NULL);
1456: MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
1457: PetscLogObjectParent((PetscObject)mat,(PetscObject)A);
1459: /* copy over the A part */
1460: Aloc = (Mat_SeqAIJ*)aij->A->data;
1461: m = aij->A->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1462: row = mat->rmap->rstart;
1463: for (i=0; i<ai[m]; i++) aj[i] += mat->cmap->rstart;
1464: for (i=0; i<m; i++) {
1465: MatSetValues(A,1,&row,ai[i+1]-ai[i],aj,a,INSERT_VALUES);
1466: row++;
1467: a += ai[i+1]-ai[i]; aj += ai[i+1]-ai[i];
1468: }
1469: aj = Aloc->j;
1470: for (i=0; i<ai[m]; i++) aj[i] -= mat->cmap->rstart;
1472: /* copy over the B part */
1473: Aloc = (Mat_SeqAIJ*)aij->B->data;
1474: m = aij->B->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1475: row = mat->rmap->rstart;
1476: PetscMalloc1(ai[m]+1,&cols);
1477: ct = cols;
1478: for (i=0; i<ai[m]; i++) cols[i] = aij->garray[aj[i]];
1479: for (i=0; i<m; i++) {
1480: MatSetValues(A,1,&row,ai[i+1]-ai[i],cols,a,INSERT_VALUES);
1481: row++;
1482: a += ai[i+1]-ai[i]; cols += ai[i+1]-ai[i];
1483: }
1484: PetscFree(ct);
1485: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1486: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1487: /*
1488: Everyone has to call to draw the matrix since the graphics waits are
1489: synchronized across all processors that share the PetscDraw object
1490: */
1491: PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1492: if (!rank) {
1493: PetscObjectSetName((PetscObject)((Mat_MPIAIJ*)(A->data))->A,((PetscObject)mat)->name);
1494: MatView_SeqAIJ(((Mat_MPIAIJ*)(A->data))->A,sviewer);
1495: }
1496: PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1497: PetscViewerFlush(viewer);
1498: MatDestroy(&A);
1499: }
1500: return(0);
1501: }
1503: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1504: {
1506: PetscBool iascii,isdraw,issocket,isbinary;
1509: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1510: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1511: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1512: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1513: if (iascii || isdraw || isbinary || issocket) {
1514: MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1515: }
1516: return(0);
1517: }
1519: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1520: {
1521: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1523: Vec bb1 = 0;
1524: PetscBool hasop;
1527: if (flag == SOR_APPLY_UPPER) {
1528: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1529: return(0);
1530: }
1532: if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1533: VecDuplicate(bb,&bb1);
1534: }
1536: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1537: if (flag & SOR_ZERO_INITIAL_GUESS) {
1538: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1539: its--;
1540: }
1542: while (its--) {
1543: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1544: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1546: /* update rhs: bb1 = bb - B*x */
1547: VecScale(mat->lvec,-1.0);
1548: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1550: /* local sweep */
1551: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1552: }
1553: } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1554: if (flag & SOR_ZERO_INITIAL_GUESS) {
1555: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1556: its--;
1557: }
1558: while (its--) {
1559: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1560: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1562: /* update rhs: bb1 = bb - B*x */
1563: VecScale(mat->lvec,-1.0);
1564: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1566: /* local sweep */
1567: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1568: }
1569: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1570: if (flag & SOR_ZERO_INITIAL_GUESS) {
1571: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1572: its--;
1573: }
1574: while (its--) {
1575: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1576: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1578: /* update rhs: bb1 = bb - B*x */
1579: VecScale(mat->lvec,-1.0);
1580: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1582: /* local sweep */
1583: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1584: }
1585: } else if (flag & SOR_EISENSTAT) {
1586: Vec xx1;
1588: VecDuplicate(bb,&xx1);
1589: (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);
1591: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1592: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1593: if (!mat->diag) {
1594: MatCreateVecs(matin,&mat->diag,NULL);
1595: MatGetDiagonal(matin,mat->diag);
1596: }
1597: MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1598: if (hasop) {
1599: MatMultDiagonalBlock(matin,xx,bb1);
1600: } else {
1601: VecPointwiseMult(bb1,mat->diag,xx);
1602: }
1603: VecAYPX(bb1,(omega-2.0)/omega,bb);
1605: MatMultAdd(mat->B,mat->lvec,bb1,bb1);
1607: /* local sweep */
1608: (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1609: VecAXPY(xx,1.0,xx1);
1610: VecDestroy(&xx1);
1611: } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");
1613: VecDestroy(&bb1);
1615: matin->factorerrortype = mat->A->factorerrortype;
1616: return(0);
1617: }
1619: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1620: {
1621: Mat aA,aB,Aperm;
1622: const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1623: PetscScalar *aa,*ba;
1624: PetscInt i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1625: PetscSF rowsf,sf;
1626: IS parcolp = NULL;
1627: PetscBool done;
1631: MatGetLocalSize(A,&m,&n);
1632: ISGetIndices(rowp,&rwant);
1633: ISGetIndices(colp,&cwant);
1634: PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);
1636: /* Invert row permutation to find out where my rows should go */
1637: PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1638: PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1639: PetscSFSetFromOptions(rowsf);
1640: for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1641: PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1642: PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1644: /* Invert column permutation to find out where my columns should go */
1645: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1646: PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1647: PetscSFSetFromOptions(sf);
1648: for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1649: PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1650: PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1651: PetscSFDestroy(&sf);
1653: ISRestoreIndices(rowp,&rwant);
1654: ISRestoreIndices(colp,&cwant);
1655: MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);
1657: /* Find out where my gcols should go */
1658: MatGetSize(aB,NULL,&ng);
1659: PetscMalloc1(ng,&gcdest);
1660: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1661: PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1662: PetscSFSetFromOptions(sf);
1663: PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);
1664: PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);
1665: PetscSFDestroy(&sf);
1667: PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1668: MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1669: MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1670: for (i=0; i<m; i++) {
1671: PetscInt row = rdest[i],rowner;
1672: PetscLayoutFindOwner(A->rmap,row,&rowner);
1673: for (j=ai[i]; j<ai[i+1]; j++) {
1674: PetscInt cowner,col = cdest[aj[j]];
1675: PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1676: if (rowner == cowner) dnnz[i]++;
1677: else onnz[i]++;
1678: }
1679: for (j=bi[i]; j<bi[i+1]; j++) {
1680: PetscInt cowner,col = gcdest[bj[j]];
1681: PetscLayoutFindOwner(A->cmap,col,&cowner);
1682: if (rowner == cowner) dnnz[i]++;
1683: else onnz[i]++;
1684: }
1685: }
1686: PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);
1687: PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);
1688: PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);
1689: PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);
1690: PetscSFDestroy(&rowsf);
1692: MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1693: MatSeqAIJGetArray(aA,&aa);
1694: MatSeqAIJGetArray(aB,&ba);
1695: for (i=0; i<m; i++) {
1696: PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1697: PetscInt j0,rowlen;
1698: rowlen = ai[i+1] - ai[i];
1699: for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1700: for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1701: MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1702: }
1703: rowlen = bi[i+1] - bi[i];
1704: for (j0=j=0; j<rowlen; j0=j) {
1705: for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1706: MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1707: }
1708: }
1709: MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1710: MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1711: MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1712: MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1713: MatSeqAIJRestoreArray(aA,&aa);
1714: MatSeqAIJRestoreArray(aB,&ba);
1715: PetscFree4(dnnz,onnz,tdnnz,tonnz);
1716: PetscFree3(work,rdest,cdest);
1717: PetscFree(gcdest);
1718: if (parcolp) {ISDestroy(&colp);}
1719: *B = Aperm;
1720: return(0);
1721: }
1723: PetscErrorCode MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1724: {
1725: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1729: MatGetSize(aij->B,NULL,nghosts);
1730: if (ghosts) *ghosts = aij->garray;
1731: return(0);
1732: }
1734: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1735: {
1736: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1737: Mat A = mat->A,B = mat->B;
1739: PetscReal isend[5],irecv[5];
1742: info->block_size = 1.0;
1743: MatGetInfo(A,MAT_LOCAL,info);
1745: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1746: isend[3] = info->memory; isend[4] = info->mallocs;
1748: MatGetInfo(B,MAT_LOCAL,info);
1750: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1751: isend[3] += info->memory; isend[4] += info->mallocs;
1752: if (flag == MAT_LOCAL) {
1753: info->nz_used = isend[0];
1754: info->nz_allocated = isend[1];
1755: info->nz_unneeded = isend[2];
1756: info->memory = isend[3];
1757: info->mallocs = isend[4];
1758: } else if (flag == MAT_GLOBAL_MAX) {
1759: MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));
1761: info->nz_used = irecv[0];
1762: info->nz_allocated = irecv[1];
1763: info->nz_unneeded = irecv[2];
1764: info->memory = irecv[3];
1765: info->mallocs = irecv[4];
1766: } else if (flag == MAT_GLOBAL_SUM) {
1767: MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));
1769: info->nz_used = irecv[0];
1770: info->nz_allocated = irecv[1];
1771: info->nz_unneeded = irecv[2];
1772: info->memory = irecv[3];
1773: info->mallocs = irecv[4];
1774: }
1775: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1776: info->fill_ratio_needed = 0;
1777: info->factor_mallocs = 0;
1778: return(0);
1779: }
1781: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1782: {
1783: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1787: switch (op) {
1788: case MAT_NEW_NONZERO_LOCATIONS:
1789: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1790: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1791: case MAT_KEEP_NONZERO_PATTERN:
1792: case MAT_NEW_NONZERO_LOCATION_ERR:
1793: case MAT_USE_INODES:
1794: case MAT_IGNORE_ZERO_ENTRIES:
1795: MatCheckPreallocated(A,1);
1796: MatSetOption(a->A,op,flg);
1797: MatSetOption(a->B,op,flg);
1798: break;
1799: case MAT_ROW_ORIENTED:
1800: MatCheckPreallocated(A,1);
1801: a->roworiented = flg;
1803: MatSetOption(a->A,op,flg);
1804: MatSetOption(a->B,op,flg);
1805: break;
1806: case MAT_NEW_DIAGONALS:
1807: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1808: break;
1809: case MAT_IGNORE_OFF_PROC_ENTRIES:
1810: a->donotstash = flg;
1811: break;
1812: /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1813: case MAT_SPD:
1814: case MAT_SYMMETRIC:
1815: case MAT_STRUCTURALLY_SYMMETRIC:
1816: case MAT_HERMITIAN:
1817: case MAT_SYMMETRY_ETERNAL:
1818: break;
1819: case MAT_SUBMAT_SINGLEIS:
1820: A->submat_singleis = flg;
1821: break;
1822: case MAT_STRUCTURE_ONLY:
1823: /* The option is handled directly by MatSetOption() */
1824: break;
1825: default:
1826: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1827: }
1828: return(0);
1829: }
1831: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1832: {
1833: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1834: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
1836: PetscInt i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1837: PetscInt nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1838: PetscInt *cmap,*idx_p;
1841: if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1842: mat->getrowactive = PETSC_TRUE;
1844: if (!mat->rowvalues && (idx || v)) {
1845: /*
1846: allocate enough space to hold information from the longest row.
1847: */
1848: Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1849: PetscInt max = 1,tmp;
1850: for (i=0; i<matin->rmap->n; i++) {
1851: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1852: if (max < tmp) max = tmp;
1853: }
1854: PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1855: }
1857: if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1858: lrow = row - rstart;
1860: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1861: if (!v) {pvA = 0; pvB = 0;}
1862: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1863: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1864: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1865: nztot = nzA + nzB;
1867: cmap = mat->garray;
1868: if (v || idx) {
1869: if (nztot) {
1870: /* Sort by increasing column numbers, assuming A and B already sorted */
1871: PetscInt imark = -1;
1872: if (v) {
1873: *v = v_p = mat->rowvalues;
1874: for (i=0; i<nzB; i++) {
1875: if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1876: else break;
1877: }
1878: imark = i;
1879: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1880: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1881: }
1882: if (idx) {
1883: *idx = idx_p = mat->rowindices;
1884: if (imark > -1) {
1885: for (i=0; i<imark; i++) {
1886: idx_p[i] = cmap[cworkB[i]];
1887: }
1888: } else {
1889: for (i=0; i<nzB; i++) {
1890: if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1891: else break;
1892: }
1893: imark = i;
1894: }
1895: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart + cworkA[i];
1896: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]];
1897: }
1898: } else {
1899: if (idx) *idx = 0;
1900: if (v) *v = 0;
1901: }
1902: }
1903: *nz = nztot;
1904: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1905: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1906: return(0);
1907: }
1909: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1910: {
1911: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1914: if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1915: aij->getrowactive = PETSC_FALSE;
1916: return(0);
1917: }
1919: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1920: {
1921: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1922: Mat_SeqAIJ *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1924: PetscInt i,j,cstart = mat->cmap->rstart;
1925: PetscReal sum = 0.0;
1926: MatScalar *v;
1929: if (aij->size == 1) {
1930: MatNorm(aij->A,type,norm);
1931: } else {
1932: if (type == NORM_FROBENIUS) {
1933: v = amat->a;
1934: for (i=0; i<amat->nz; i++) {
1935: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1936: }
1937: v = bmat->a;
1938: for (i=0; i<bmat->nz; i++) {
1939: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1940: }
1941: MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1942: *norm = PetscSqrtReal(*norm);
1943: PetscLogFlops(2*amat->nz+2*bmat->nz);
1944: } else if (type == NORM_1) { /* max column norm */
1945: PetscReal *tmp,*tmp2;
1946: PetscInt *jj,*garray = aij->garray;
1947: PetscCalloc1(mat->cmap->N+1,&tmp);
1948: PetscMalloc1(mat->cmap->N+1,&tmp2);
1949: *norm = 0.0;
1950: v = amat->a; jj = amat->j;
1951: for (j=0; j<amat->nz; j++) {
1952: tmp[cstart + *jj++] += PetscAbsScalar(*v); v++;
1953: }
1954: v = bmat->a; jj = bmat->j;
1955: for (j=0; j<bmat->nz; j++) {
1956: tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1957: }
1958: MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1959: for (j=0; j<mat->cmap->N; j++) {
1960: if (tmp2[j] > *norm) *norm = tmp2[j];
1961: }
1962: PetscFree(tmp);
1963: PetscFree(tmp2);
1964: PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1965: } else if (type == NORM_INFINITY) { /* max row norm */
1966: PetscReal ntemp = 0.0;
1967: for (j=0; j<aij->A->rmap->n; j++) {
1968: v = amat->a + amat->i[j];
1969: sum = 0.0;
1970: for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1971: sum += PetscAbsScalar(*v); v++;
1972: }
1973: v = bmat->a + bmat->i[j];
1974: for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1975: sum += PetscAbsScalar(*v); v++;
1976: }
1977: if (sum > ntemp) ntemp = sum;
1978: }
1979: MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
1980: PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1981: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1982: }
1983: return(0);
1984: }
1986: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1987: {
1988: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*b;
1989: Mat_SeqAIJ *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
1990: PetscInt M = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,*B_diag_ilen,*B_diag_i,i,ncol,A_diag_ncol;
1992: Mat B,A_diag,*B_diag;
1993: MatScalar *array;
1996: ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
1997: ai = Aloc->i; aj = Aloc->j;
1998: bi = Bloc->i; bj = Bloc->j;
1999: if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
2000: PetscInt *d_nnz,*g_nnz,*o_nnz;
2001: PetscSFNode *oloc;
2002: PETSC_UNUSED PetscSF sf;
2004: PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
2005: /* compute d_nnz for preallocation */
2006: PetscMemzero(d_nnz,na*sizeof(PetscInt));
2007: for (i=0; i<ai[ma]; i++) {
2008: d_nnz[aj[i]]++;
2009: }
2010: /* compute local off-diagonal contributions */
2011: PetscMemzero(g_nnz,nb*sizeof(PetscInt));
2012: for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
2013: /* map those to global */
2014: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
2015: PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
2016: PetscSFSetFromOptions(sf);
2017: PetscMemzero(o_nnz,na*sizeof(PetscInt));
2018: PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2019: PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2020: PetscSFDestroy(&sf);
2022: MatCreate(PetscObjectComm((PetscObject)A),&B);
2023: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
2024: MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
2025: MatSetType(B,((PetscObject)A)->type_name);
2026: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
2027: PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
2028: } else {
2029: B = *matout;
2030: MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
2031: }
2033: b = (Mat_MPIAIJ*)B->data;
2034: A_diag = a->A;
2035: B_diag = &b->A;
2036: sub_B_diag = (Mat_SeqAIJ*)(*B_diag)->data;
2037: A_diag_ncol = A_diag->cmap->N;
2038: B_diag_ilen = sub_B_diag->ilen;
2039: B_diag_i = sub_B_diag->i;
2041: /* Set ilen for diagonal of B */
2042: for (i=0; i<A_diag_ncol; i++) {
2043: B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
2044: }
2046: /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
2047: very quickly (=without using MatSetValues), because all writes are local. */
2048: MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);
2050: /* copy over the B part */
2051: PetscCalloc1(bi[mb],&cols);
2052: array = Bloc->a;
2053: row = A->rmap->rstart;
2054: for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
2055: cols_tmp = cols;
2056: for (i=0; i<mb; i++) {
2057: ncol = bi[i+1]-bi[i];
2058: MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
2059: row++;
2060: array += ncol; cols_tmp += ncol;
2061: }
2062: PetscFree(cols);
2064: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2065: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2066: if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
2067: *matout = B;
2068: } else {
2069: MatHeaderMerge(A,&B);
2070: }
2071: return(0);
2072: }
2074: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
2075: {
2076: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2077: Mat a = aij->A,b = aij->B;
2079: PetscInt s1,s2,s3;
2082: MatGetLocalSize(mat,&s2,&s3);
2083: if (rr) {
2084: VecGetLocalSize(rr,&s1);
2085: if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
2086: /* Overlap communication with computation. */
2087: VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2088: }
2089: if (ll) {
2090: VecGetLocalSize(ll,&s1);
2091: if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
2092: (*b->ops->diagonalscale)(b,ll,0);
2093: }
2094: /* scale the diagonal block */
2095: (*a->ops->diagonalscale)(a,ll,rr);
2097: if (rr) {
2098: /* Do a scatter end and then right scale the off-diagonal block */
2099: VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2100: (*b->ops->diagonalscale)(b,0,aij->lvec);
2101: }
2102: return(0);
2103: }
2105: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2106: {
2107: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2111: MatSetUnfactored(a->A);
2112: return(0);
2113: }
2115: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool *flag)
2116: {
2117: Mat_MPIAIJ *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2118: Mat a,b,c,d;
2119: PetscBool flg;
2123: a = matA->A; b = matA->B;
2124: c = matB->A; d = matB->B;
2126: MatEqual(a,c,&flg);
2127: if (flg) {
2128: MatEqual(b,d,&flg);
2129: }
2130: MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2131: return(0);
2132: }
2134: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2135: {
2137: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2138: Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
2141: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2142: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2143: /* because of the column compression in the off-processor part of the matrix a->B,
2144: the number of columns in a->B and b->B may be different, hence we cannot call
2145: the MatCopy() directly on the two parts. If need be, we can provide a more
2146: efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2147: then copying the submatrices */
2148: MatCopy_Basic(A,B,str);
2149: } else {
2150: MatCopy(a->A,b->A,str);
2151: MatCopy(a->B,b->B,str);
2152: }
2153: PetscObjectStateIncrease((PetscObject)B);
2154: return(0);
2155: }
2157: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2158: {
2162: MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2163: return(0);
2164: }
2166: /*
2167: Computes the number of nonzeros per row needed for preallocation when X and Y
2168: have different nonzero structure.
2169: */
2170: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2171: {
2172: PetscInt i,j,k,nzx,nzy;
2175: /* Set the number of nonzeros in the new matrix */
2176: for (i=0; i<m; i++) {
2177: const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2178: nzx = xi[i+1] - xi[i];
2179: nzy = yi[i+1] - yi[i];
2180: nnz[i] = 0;
2181: for (j=0,k=0; j<nzx; j++) { /* Point in X */
2182: for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2183: if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++; /* Skip duplicate */
2184: nnz[i]++;
2185: }
2186: for (; k<nzy; k++) nnz[i]++;
2187: }
2188: return(0);
2189: }
2191: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2192: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2193: {
2195: PetscInt m = Y->rmap->N;
2196: Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data;
2197: Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data;
2200: MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2201: return(0);
2202: }
2204: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2205: {
2207: Mat_MPIAIJ *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2208: PetscBLASInt bnz,one=1;
2209: Mat_SeqAIJ *x,*y;
2212: if (str == SAME_NONZERO_PATTERN) {
2213: PetscScalar alpha = a;
2214: x = (Mat_SeqAIJ*)xx->A->data;
2215: PetscBLASIntCast(x->nz,&bnz);
2216: y = (Mat_SeqAIJ*)yy->A->data;
2217: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2218: x = (Mat_SeqAIJ*)xx->B->data;
2219: y = (Mat_SeqAIJ*)yy->B->data;
2220: PetscBLASIntCast(x->nz,&bnz);
2221: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2222: PetscObjectStateIncrease((PetscObject)Y);
2223: } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2224: MatAXPY_Basic(Y,a,X,str);
2225: } else {
2226: Mat B;
2227: PetscInt *nnz_d,*nnz_o;
2228: PetscMalloc1(yy->A->rmap->N,&nnz_d);
2229: PetscMalloc1(yy->B->rmap->N,&nnz_o);
2230: MatCreate(PetscObjectComm((PetscObject)Y),&B);
2231: PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2232: MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2233: MatSetBlockSizesFromMats(B,Y,Y);
2234: MatSetType(B,MATMPIAIJ);
2235: MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2236: MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2237: MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2238: MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2239: MatHeaderReplace(Y,&B);
2240: PetscFree(nnz_d);
2241: PetscFree(nnz_o);
2242: }
2243: return(0);
2244: }
2246: extern PetscErrorCode MatConjugate_SeqAIJ(Mat);
2248: PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2249: {
2250: #if defined(PETSC_USE_COMPLEX)
2252: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2255: MatConjugate_SeqAIJ(aij->A);
2256: MatConjugate_SeqAIJ(aij->B);
2257: #else
2259: #endif
2260: return(0);
2261: }
2263: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2264: {
2265: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2269: MatRealPart(a->A);
2270: MatRealPart(a->B);
2271: return(0);
2272: }
2274: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2275: {
2276: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2280: MatImaginaryPart(a->A);
2281: MatImaginaryPart(a->B);
2282: return(0);
2283: }
2285: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2286: {
2287: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2289: PetscInt i,*idxb = 0;
2290: PetscScalar *va,*vb;
2291: Vec vtmp;
2294: MatGetRowMaxAbs(a->A,v,idx);
2295: VecGetArray(v,&va);
2296: if (idx) {
2297: for (i=0; i<A->rmap->n; i++) {
2298: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2299: }
2300: }
2302: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2303: if (idx) {
2304: PetscMalloc1(A->rmap->n,&idxb);
2305: }
2306: MatGetRowMaxAbs(a->B,vtmp,idxb);
2307: VecGetArray(vtmp,&vb);
2309: for (i=0; i<A->rmap->n; i++) {
2310: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2311: va[i] = vb[i];
2312: if (idx) idx[i] = a->garray[idxb[i]];
2313: }
2314: }
2316: VecRestoreArray(v,&va);
2317: VecRestoreArray(vtmp,&vb);
2318: PetscFree(idxb);
2319: VecDestroy(&vtmp);
2320: return(0);
2321: }
2323: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2324: {
2325: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2327: PetscInt i,*idxb = 0;
2328: PetscScalar *va,*vb;
2329: Vec vtmp;
2332: MatGetRowMinAbs(a->A,v,idx);
2333: VecGetArray(v,&va);
2334: if (idx) {
2335: for (i=0; i<A->cmap->n; i++) {
2336: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2337: }
2338: }
2340: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2341: if (idx) {
2342: PetscMalloc1(A->rmap->n,&idxb);
2343: }
2344: MatGetRowMinAbs(a->B,vtmp,idxb);
2345: VecGetArray(vtmp,&vb);
2347: for (i=0; i<A->rmap->n; i++) {
2348: if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2349: va[i] = vb[i];
2350: if (idx) idx[i] = a->garray[idxb[i]];
2351: }
2352: }
2354: VecRestoreArray(v,&va);
2355: VecRestoreArray(vtmp,&vb);
2356: PetscFree(idxb);
2357: VecDestroy(&vtmp);
2358: return(0);
2359: }
2361: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2362: {
2363: Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data;
2364: PetscInt n = A->rmap->n;
2365: PetscInt cstart = A->cmap->rstart;
2366: PetscInt *cmap = mat->garray;
2367: PetscInt *diagIdx, *offdiagIdx;
2368: Vec diagV, offdiagV;
2369: PetscScalar *a, *diagA, *offdiagA;
2370: PetscInt r;
2374: PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2375: VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);
2376: VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);
2377: MatGetRowMin(mat->A, diagV, diagIdx);
2378: MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2379: VecGetArray(v, &a);
2380: VecGetArray(diagV, &diagA);
2381: VecGetArray(offdiagV, &offdiagA);
2382: for (r = 0; r < n; ++r) {
2383: if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2384: a[r] = diagA[r];
2385: idx[r] = cstart + diagIdx[r];
2386: } else {
2387: a[r] = offdiagA[r];
2388: idx[r] = cmap[offdiagIdx[r]];
2389: }
2390: }
2391: VecRestoreArray(v, &a);
2392: VecRestoreArray(diagV, &diagA);
2393: VecRestoreArray(offdiagV, &offdiagA);
2394: VecDestroy(&diagV);
2395: VecDestroy(&offdiagV);
2396: PetscFree2(diagIdx, offdiagIdx);
2397: return(0);
2398: }
2400: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2401: {
2402: Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data;
2403: PetscInt n = A->rmap->n;
2404: PetscInt cstart = A->cmap->rstart;
2405: PetscInt *cmap = mat->garray;
2406: PetscInt *diagIdx, *offdiagIdx;
2407: Vec diagV, offdiagV;
2408: PetscScalar *a, *diagA, *offdiagA;
2409: PetscInt r;
2413: PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2414: VecCreateSeq(PETSC_COMM_SELF, n, &diagV);
2415: VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);
2416: MatGetRowMax(mat->A, diagV, diagIdx);
2417: MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2418: VecGetArray(v, &a);
2419: VecGetArray(diagV, &diagA);
2420: VecGetArray(offdiagV, &offdiagA);
2421: for (r = 0; r < n; ++r) {
2422: if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2423: a[r] = diagA[r];
2424: idx[r] = cstart + diagIdx[r];
2425: } else {
2426: a[r] = offdiagA[r];
2427: idx[r] = cmap[offdiagIdx[r]];
2428: }
2429: }
2430: VecRestoreArray(v, &a);
2431: VecRestoreArray(diagV, &diagA);
2432: VecRestoreArray(offdiagV, &offdiagA);
2433: VecDestroy(&diagV);
2434: VecDestroy(&offdiagV);
2435: PetscFree2(diagIdx, offdiagIdx);
2436: return(0);
2437: }
2439: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2440: {
2442: Mat *dummy;
2445: MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2446: *newmat = *dummy;
2447: PetscFree(dummy);
2448: return(0);
2449: }
2451: PetscErrorCode MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2452: {
2453: Mat_MPIAIJ *a = (Mat_MPIAIJ*) A->data;
2457: MatInvertBlockDiagonal(a->A,values);
2458: A->factorerrortype = a->A->factorerrortype;
2459: return(0);
2460: }
2462: static PetscErrorCode MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2463: {
2465: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)x->data;
2468: MatSetRandom(aij->A,rctx);
2469: MatSetRandom(aij->B,rctx);
2470: MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2471: MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2472: return(0);
2473: }
2475: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2476: {
2478: if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2479: else A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ;
2480: return(0);
2481: }
2483: /*@
2484: MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap
2486: Collective on Mat
2488: Input Parameters:
2489: + A - the matrix
2490: - sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)
2492: Level: advanced
2494: @*/
2495: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2496: {
2497: PetscErrorCode ierr;
2500: PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2501: return(0);
2502: }
2504: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2505: {
2506: PetscErrorCode ierr;
2507: PetscBool sc = PETSC_FALSE,flg;
2510: PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2511: if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2512: PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2513: if (flg) {
2514: MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2515: }
2516: PetscOptionsTail();
2517: return(0);
2518: }
2520: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2521: {
2523: Mat_MPIAIJ *maij = (Mat_MPIAIJ*)Y->data;
2524: Mat_SeqAIJ *aij = (Mat_SeqAIJ*)maij->A->data;
2527: if (!Y->preallocated) {
2528: MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2529: } else if (!aij->nz) {
2530: PetscInt nonew = aij->nonew;
2531: MatSeqAIJSetPreallocation(maij->A,1,NULL);
2532: aij->nonew = nonew;
2533: }
2534: MatShift_Basic(Y,a);
2535: return(0);
2536: }
2538: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool *missing,PetscInt *d)
2539: {
2540: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2544: if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2545: MatMissingDiagonal(a->A,missing,d);
2546: if (d) {
2547: PetscInt rstart;
2548: MatGetOwnershipRange(A,&rstart,NULL);
2549: *d += rstart;
2551: }
2552: return(0);
2553: }
2555: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2556: {
2557: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2561: MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2562: return(0);
2563: }
2565: /* -------------------------------------------------------------------*/
2566: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2567: MatGetRow_MPIAIJ,
2568: MatRestoreRow_MPIAIJ,
2569: MatMult_MPIAIJ,
2570: /* 4*/ MatMultAdd_MPIAIJ,
2571: MatMultTranspose_MPIAIJ,
2572: MatMultTransposeAdd_MPIAIJ,
2573: 0,
2574: 0,
2575: 0,
2576: /*10*/ 0,
2577: 0,
2578: 0,
2579: MatSOR_MPIAIJ,
2580: MatTranspose_MPIAIJ,
2581: /*15*/ MatGetInfo_MPIAIJ,
2582: MatEqual_MPIAIJ,
2583: MatGetDiagonal_MPIAIJ,
2584: MatDiagonalScale_MPIAIJ,
2585: MatNorm_MPIAIJ,
2586: /*20*/ MatAssemblyBegin_MPIAIJ,
2587: MatAssemblyEnd_MPIAIJ,
2588: MatSetOption_MPIAIJ,
2589: MatZeroEntries_MPIAIJ,
2590: /*24*/ MatZeroRows_MPIAIJ,
2591: 0,
2592: 0,
2593: 0,
2594: 0,
2595: /*29*/ MatSetUp_MPIAIJ,
2596: 0,
2597: 0,
2598: MatGetDiagonalBlock_MPIAIJ,
2599: 0,
2600: /*34*/ MatDuplicate_MPIAIJ,
2601: 0,
2602: 0,
2603: 0,
2604: 0,
2605: /*39*/ MatAXPY_MPIAIJ,
2606: MatCreateSubMatrices_MPIAIJ,
2607: MatIncreaseOverlap_MPIAIJ,
2608: MatGetValues_MPIAIJ,
2609: MatCopy_MPIAIJ,
2610: /*44*/ MatGetRowMax_MPIAIJ,
2611: MatScale_MPIAIJ,
2612: MatShift_MPIAIJ,
2613: MatDiagonalSet_MPIAIJ,
2614: MatZeroRowsColumns_MPIAIJ,
2615: /*49*/ MatSetRandom_MPIAIJ,
2616: 0,
2617: 0,
2618: 0,
2619: 0,
2620: /*54*/ MatFDColoringCreate_MPIXAIJ,
2621: 0,
2622: MatSetUnfactored_MPIAIJ,
2623: MatPermute_MPIAIJ,
2624: 0,
2625: /*59*/ MatCreateSubMatrix_MPIAIJ,
2626: MatDestroy_MPIAIJ,
2627: MatView_MPIAIJ,
2628: 0,
2629: MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ,
2630: /*64*/ MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ,
2631: MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2632: 0,
2633: 0,
2634: 0,
2635: /*69*/ MatGetRowMaxAbs_MPIAIJ,
2636: MatGetRowMinAbs_MPIAIJ,
2637: 0,
2638: 0,
2639: 0,
2640: 0,
2641: /*75*/ MatFDColoringApply_AIJ,
2642: MatSetFromOptions_MPIAIJ,
2643: 0,
2644: 0,
2645: MatFindZeroDiagonals_MPIAIJ,
2646: /*80*/ 0,
2647: 0,
2648: 0,
2649: /*83*/ MatLoad_MPIAIJ,
2650: MatIsSymmetric_MPIAIJ,
2651: 0,
2652: 0,
2653: 0,
2654: 0,
2655: /*89*/ MatMatMult_MPIAIJ_MPIAIJ,
2656: MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2657: MatMatMultNumeric_MPIAIJ_MPIAIJ,
2658: MatPtAP_MPIAIJ_MPIAIJ,
2659: MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2660: /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2661: 0,
2662: 0,
2663: 0,
2664: 0,
2665: /*99*/ 0,
2666: 0,
2667: 0,
2668: MatConjugate_MPIAIJ,
2669: 0,
2670: /*104*/MatSetValuesRow_MPIAIJ,
2671: MatRealPart_MPIAIJ,
2672: MatImaginaryPart_MPIAIJ,
2673: 0,
2674: 0,
2675: /*109*/0,
2676: 0,
2677: MatGetRowMin_MPIAIJ,
2678: 0,
2679: MatMissingDiagonal_MPIAIJ,
2680: /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2681: 0,
2682: MatGetGhosts_MPIAIJ,
2683: 0,
2684: 0,
2685: /*119*/0,
2686: 0,
2687: 0,
2688: 0,
2689: MatGetMultiProcBlock_MPIAIJ,
2690: /*124*/MatFindNonzeroRows_MPIAIJ,
2691: MatGetColumnNorms_MPIAIJ,
2692: MatInvertBlockDiagonal_MPIAIJ,
2693: MatInvertVariableBlockDiagonal_MPIAIJ,
2694: MatCreateSubMatricesMPI_MPIAIJ,
2695: /*129*/0,
2696: MatTransposeMatMult_MPIAIJ_MPIAIJ,
2697: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ,
2698: MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2699: 0,
2700: /*134*/0,
2701: 0,
2702: MatRARt_MPIAIJ_MPIAIJ,
2703: 0,
2704: 0,
2705: /*139*/MatSetBlockSizes_MPIAIJ,
2706: 0,
2707: 0,
2708: MatFDColoringSetUp_MPIXAIJ,
2709: MatFindOffBlockDiagonalEntries_MPIAIJ,
2710: /*144*/MatCreateMPIMatConcatenateSeqMat_MPIAIJ
2711: };
2713: /* ----------------------------------------------------------------------------------------*/
2715: PetscErrorCode MatStoreValues_MPIAIJ(Mat mat)
2716: {
2717: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2721: MatStoreValues(aij->A);
2722: MatStoreValues(aij->B);
2723: return(0);
2724: }
2726: PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat)
2727: {
2728: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2732: MatRetrieveValues(aij->A);
2733: MatRetrieveValues(aij->B);
2734: return(0);
2735: }
2737: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2738: {
2739: Mat_MPIAIJ *b;
2743: PetscLayoutSetUp(B->rmap);
2744: PetscLayoutSetUp(B->cmap);
2745: b = (Mat_MPIAIJ*)B->data;
2747: #if defined(PETSC_USE_CTABLE)
2748: PetscTableDestroy(&b->colmap);
2749: #else
2750: PetscFree(b->colmap);
2751: #endif
2752: PetscFree(b->garray);
2753: VecDestroy(&b->lvec);
2754: VecScatterDestroy(&b->Mvctx);
2756: /* Because the B will have been resized we simply destroy it and create a new one each time */
2757: MatDestroy(&b->B);
2758: MatCreate(PETSC_COMM_SELF,&b->B);
2759: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
2760: MatSetBlockSizesFromMats(b->B,B,B);
2761: MatSetType(b->B,MATSEQAIJ);
2762: PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
2764: if (!B->preallocated) {
2765: MatCreate(PETSC_COMM_SELF,&b->A);
2766: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2767: MatSetBlockSizesFromMats(b->A,B,B);
2768: MatSetType(b->A,MATSEQAIJ);
2769: PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2770: }
2772: MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2773: MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2774: B->preallocated = PETSC_TRUE;
2775: B->was_assembled = PETSC_FALSE;
2776: B->assembled = PETSC_FALSE;
2777: return(0);
2778: }
2780: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2781: {
2782: Mat_MPIAIJ *b;
2787: PetscLayoutSetUp(B->rmap);
2788: PetscLayoutSetUp(B->cmap);
2789: b = (Mat_MPIAIJ*)B->data;
2791: #if defined(PETSC_USE_CTABLE)
2792: PetscTableDestroy(&b->colmap);
2793: #else
2794: PetscFree(b->colmap);
2795: #endif
2796: PetscFree(b->garray);
2797: VecDestroy(&b->lvec);
2798: VecScatterDestroy(&b->Mvctx);
2800: MatResetPreallocation(b->A);
2801: MatResetPreallocation(b->B);
2802: B->preallocated = PETSC_TRUE;
2803: B->was_assembled = PETSC_FALSE;
2804: B->assembled = PETSC_FALSE;
2805: return(0);
2806: }
2808: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2809: {
2810: Mat mat;
2811: Mat_MPIAIJ *a,*oldmat = (Mat_MPIAIJ*)matin->data;
2815: *newmat = 0;
2816: MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2817: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2818: MatSetBlockSizesFromMats(mat,matin,matin);
2819: MatSetType(mat,((PetscObject)matin)->type_name);
2820: a = (Mat_MPIAIJ*)mat->data;
2822: mat->factortype = matin->factortype;
2823: mat->assembled = PETSC_TRUE;
2824: mat->insertmode = NOT_SET_VALUES;
2825: mat->preallocated = PETSC_TRUE;
2827: a->size = oldmat->size;
2828: a->rank = oldmat->rank;
2829: a->donotstash = oldmat->donotstash;
2830: a->roworiented = oldmat->roworiented;
2831: a->rowindices = 0;
2832: a->rowvalues = 0;
2833: a->getrowactive = PETSC_FALSE;
2835: PetscLayoutReference(matin->rmap,&mat->rmap);
2836: PetscLayoutReference(matin->cmap,&mat->cmap);
2838: if (oldmat->colmap) {
2839: #if defined(PETSC_USE_CTABLE)
2840: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2841: #else
2842: PetscMalloc1(mat->cmap->N,&a->colmap);
2843: PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2844: PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));
2845: #endif
2846: } else a->colmap = 0;
2847: if (oldmat->garray) {
2848: PetscInt len;
2849: len = oldmat->B->cmap->n;
2850: PetscMalloc1(len+1,&a->garray);
2851: PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2852: if (len) { PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt)); }
2853: } else a->garray = 0;
2855: VecDuplicate(oldmat->lvec,&a->lvec);
2856: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2857: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2858: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2860: if (oldmat->Mvctx_mpi1) {
2861: VecScatterCopy(oldmat->Mvctx_mpi1,&a->Mvctx_mpi1);
2862: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx_mpi1);
2863: }
2865: MatDuplicate(oldmat->A,cpvalues,&a->A);
2866: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2867: MatDuplicate(oldmat->B,cpvalues,&a->B);
2868: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2869: PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2870: *newmat = mat;
2871: return(0);
2872: }
2874: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2875: {
2876: PetscBool isbinary, ishdf5;
2882: /* force binary viewer to load .info file if it has not yet done so */
2883: PetscViewerSetUp(viewer);
2884: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2885: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5, &ishdf5);
2886: if (isbinary) {
2887: MatLoad_MPIAIJ_Binary(newMat,viewer);
2888: } else if (ishdf5) {
2889: #if defined(PETSC_HAVE_HDF5)
2890: MatLoad_AIJ_HDF5(newMat,viewer);
2891: #else
2892: SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
2893: #endif
2894: } else {
2895: SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
2896: }
2897: return(0);
2898: }
2900: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat newMat, PetscViewer viewer)
2901: {
2902: PetscScalar *vals,*svals;
2903: MPI_Comm comm;
2905: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
2906: PetscInt i,nz,j,rstart,rend,mmax,maxnz = 0;
2907: PetscInt header[4],*rowlengths = 0,M,N,m,*cols;
2908: PetscInt *ourlens = NULL,*procsnz = NULL,*offlens = NULL,jj,*mycols,*smycols;
2909: PetscInt cend,cstart,n,*rowners;
2910: int fd;
2911: PetscInt bs = newMat->rmap->bs;
2914: PetscObjectGetComm((PetscObject)viewer,&comm);
2915: MPI_Comm_size(comm,&size);
2916: MPI_Comm_rank(comm,&rank);
2917: PetscViewerBinaryGetDescriptor(viewer,&fd);
2918: if (!rank) {
2919: PetscBinaryRead(fd,(char*)header,4,PETSC_INT);
2920: if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2921: if (header[3] < 0) SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk,cannot load as MATMPIAIJ");
2922: }
2924: PetscOptionsBegin(comm,NULL,"Options for loading MATMPIAIJ matrix","Mat");
2925: PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);
2926: PetscOptionsEnd();
2927: if (bs < 0) bs = 1;
2929: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2930: M = header[1]; N = header[2];
2932: /* If global sizes are set, check if they are consistent with that given in the file */
2933: if (newMat->rmap->N >= 0 && newMat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newMat->rmap->N,M);
2934: if (newMat->cmap->N >=0 && newMat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newMat->cmap->N,N);
2936: /* determine ownership of all (block) rows */
2937: if (M%bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows (%d) and block size (%d)",M,bs);
2938: if (newMat->rmap->n < 0) m = bs*((M/bs)/size + (((M/bs) % size) > rank)); /* PETSC_DECIDE */
2939: else m = newMat->rmap->n; /* Set by user */
2941: PetscMalloc1(size+1,&rowners);
2942: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
2944: /* First process needs enough room for process with most rows */
2945: if (!rank) {
2946: mmax = rowners[1];
2947: for (i=2; i<=size; i++) {
2948: mmax = PetscMax(mmax, rowners[i]);
2949: }
2950: } else mmax = -1; /* unused, but compilers complain */
2952: rowners[0] = 0;
2953: for (i=2; i<=size; i++) {
2954: rowners[i] += rowners[i-1];
2955: }
2956: rstart = rowners[rank];
2957: rend = rowners[rank+1];
2959: /* distribute row lengths to all processors */
2960: PetscMalloc2(m,&ourlens,m,&offlens);
2961: if (!rank) {
2962: PetscBinaryRead(fd,ourlens,m,PETSC_INT);
2963: PetscMalloc1(mmax,&rowlengths);
2964: PetscCalloc1(size,&procsnz);
2965: for (j=0; j<m; j++) {
2966: procsnz[0] += ourlens[j];
2967: }
2968: for (i=1; i<size; i++) {
2969: PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);
2970: /* calculate the number of nonzeros on each processor */
2971: for (j=0; j<rowners[i+1]-rowners[i]; j++) {
2972: procsnz[i] += rowlengths[j];
2973: }
2974: MPIULong_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
2975: }
2976: PetscFree(rowlengths);
2977: } else {
2978: MPIULong_Recv(ourlens,m,MPIU_INT,0,tag,comm);
2979: }
2981: if (!rank) {
2982: /* determine max buffer needed and allocate it */
2983: maxnz = 0;
2984: for (i=0; i<size; i++) {
2985: maxnz = PetscMax(maxnz,procsnz[i]);
2986: }
2987: PetscMalloc1(maxnz,&cols);
2989: /* read in my part of the matrix column indices */
2990: nz = procsnz[0];
2991: PetscMalloc1(nz,&mycols);
2992: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2994: /* read in every one elses and ship off */
2995: for (i=1; i<size; i++) {
2996: nz = procsnz[i];
2997: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2998: MPIULong_Send(cols,nz,MPIU_INT,i,tag,comm);
2999: }
3000: PetscFree(cols);
3001: } else {
3002: /* determine buffer space needed for message */
3003: nz = 0;
3004: for (i=0; i<m; i++) {
3005: nz += ourlens[i];
3006: }
3007: PetscMalloc1(nz,&mycols);
3009: /* receive message of column indices*/
3010: MPIULong_Recv(mycols,nz,MPIU_INT,0,tag,comm);
3011: }
3013: /* determine column ownership if matrix is not square */
3014: if (N != M) {
3015: if (newMat->cmap->n < 0) n = N/size + ((N % size) > rank);
3016: else n = newMat->cmap->n;
3017: MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);
3018: cstart = cend - n;
3019: } else {
3020: cstart = rstart;
3021: cend = rend;
3022: n = cend - cstart;
3023: }
3025: /* loop over local rows, determining number of off diagonal entries */
3026: PetscMemzero(offlens,m*sizeof(PetscInt));
3027: jj = 0;
3028: for (i=0; i<m; i++) {
3029: for (j=0; j<ourlens[i]; j++) {
3030: if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
3031: jj++;
3032: }
3033: }
3035: for (i=0; i<m; i++) {
3036: ourlens[i] -= offlens[i];
3037: }
3038: MatSetSizes(newMat,m,n,M,N);
3040: if (bs > 1) {MatSetBlockSize(newMat,bs);}
3042: MatMPIAIJSetPreallocation(newMat,0,ourlens,0,offlens);
3044: for (i=0; i<m; i++) {
3045: ourlens[i] += offlens[i];
3046: }
3048: if (!rank) {
3049: PetscMalloc1(maxnz+1,&vals);
3051: /* read in my part of the matrix numerical values */
3052: nz = procsnz[0];
3053: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3055: /* insert into matrix */
3056: jj = rstart;
3057: smycols = mycols;
3058: svals = vals;
3059: for (i=0; i<m; i++) {
3060: MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3061: smycols += ourlens[i];
3062: svals += ourlens[i];
3063: jj++;
3064: }
3066: /* read in other processors and ship out */
3067: for (i=1; i<size; i++) {
3068: nz = procsnz[i];
3069: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3070: MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newMat)->tag,comm);
3071: }
3072: PetscFree(procsnz);
3073: } else {
3074: /* receive numeric values */
3075: PetscMalloc1(nz+1,&vals);
3077: /* receive message of values*/
3078: MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newMat)->tag,comm);
3080: /* insert into matrix */
3081: jj = rstart;
3082: smycols = mycols;
3083: svals = vals;
3084: for (i=0; i<m; i++) {
3085: MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3086: smycols += ourlens[i];
3087: svals += ourlens[i];
3088: jj++;
3089: }
3090: }
3091: PetscFree2(ourlens,offlens);
3092: PetscFree(vals);
3093: PetscFree(mycols);
3094: PetscFree(rowners);
3095: MatAssemblyBegin(newMat,MAT_FINAL_ASSEMBLY);
3096: MatAssemblyEnd(newMat,MAT_FINAL_ASSEMBLY);
3097: return(0);
3098: }
3100: /* Not scalable because of ISAllGather() unless getting all columns. */
3101: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
3102: {
3104: IS iscol_local;
3105: PetscBool isstride;
3106: PetscMPIInt lisstride=0,gisstride;
3109: /* check if we are grabbing all columns*/
3110: PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);
3112: if (isstride) {
3113: PetscInt start,len,mstart,mlen;
3114: ISStrideGetInfo(iscol,&start,NULL);
3115: ISGetLocalSize(iscol,&len);
3116: MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
3117: if (mstart == start && mlen-mstart == len) lisstride = 1;
3118: }
3120: MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
3121: if (gisstride) {
3122: PetscInt N;
3123: MatGetSize(mat,NULL,&N);
3124: ISCreateStride(PetscObjectComm((PetscObject)mat),N,0,1,&iscol_local);
3125: ISSetIdentity(iscol_local);
3126: PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
3127: } else {
3128: PetscInt cbs;
3129: ISGetBlockSize(iscol,&cbs);
3130: ISAllGather(iscol,&iscol_local);
3131: ISSetBlockSize(iscol_local,cbs);
3132: }
3134: *isseq = iscol_local;
3135: return(0);
3136: }
3138: /*
3139: Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3140: (see MatCreateSubMatrix_MPIAIJ_nonscalable)
3142: Input Parameters:
3143: mat - matrix
3144: isrow - parallel row index set; its local indices are a subset of local columns of mat,
3145: i.e., mat->rstart <= isrow[i] < mat->rend
3146: iscol - parallel column index set; its local indices are a subset of local columns of mat,
3147: i.e., mat->cstart <= iscol[i] < mat->cend
3148: Output Parameter:
3149: isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
3150: iscol_o - sequential column index set for retrieving mat->B
3151: garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
3152: */
3153: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
3154: {
3156: Vec x,cmap;
3157: const PetscInt *is_idx;
3158: PetscScalar *xarray,*cmaparray;
3159: PetscInt ncols,isstart,*idx,m,rstart,*cmap1,count;
3160: Mat_MPIAIJ *a=(Mat_MPIAIJ*)mat->data;
3161: Mat B=a->B;
3162: Vec lvec=a->lvec,lcmap;
3163: PetscInt i,cstart,cend,Bn=B->cmap->N;
3164: MPI_Comm comm;
3165: VecScatter Mvctx=a->Mvctx;
3168: PetscObjectGetComm((PetscObject)mat,&comm);
3169: ISGetLocalSize(iscol,&ncols);
3171: /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3172: MatCreateVecs(mat,&x,NULL);
3173: VecSet(x,-1.0);
3174: VecDuplicate(x,&cmap);
3175: VecSet(cmap,-1.0);
3177: /* Get start indices */
3178: MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3179: isstart -= ncols;
3180: MatGetOwnershipRangeColumn(mat,&cstart,&cend);
3182: ISGetIndices(iscol,&is_idx);
3183: VecGetArray(x,&xarray);
3184: VecGetArray(cmap,&cmaparray);
3185: PetscMalloc1(ncols,&idx);
3186: for (i=0; i<ncols; i++) {
3187: xarray[is_idx[i]-cstart] = (PetscScalar)is_idx[i];
3188: cmaparray[is_idx[i]-cstart] = i + isstart; /* global index of iscol[i] */
3189: idx[i] = is_idx[i]-cstart; /* local index of iscol[i] */
3190: }
3191: VecRestoreArray(x,&xarray);
3192: VecRestoreArray(cmap,&cmaparray);
3193: ISRestoreIndices(iscol,&is_idx);
3195: /* Get iscol_d */
3196: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3197: ISGetBlockSize(iscol,&i);
3198: ISSetBlockSize(*iscol_d,i);
3200: /* Get isrow_d */
3201: ISGetLocalSize(isrow,&m);
3202: rstart = mat->rmap->rstart;
3203: PetscMalloc1(m,&idx);
3204: ISGetIndices(isrow,&is_idx);
3205: for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3206: ISRestoreIndices(isrow,&is_idx);
3208: ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3209: ISGetBlockSize(isrow,&i);
3210: ISSetBlockSize(*isrow_d,i);
3212: /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3213: VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3214: VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3216: VecDuplicate(lvec,&lcmap);
3218: VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3219: VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3221: /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3222: /* off-process column indices */
3223: count = 0;
3224: PetscMalloc1(Bn,&idx);
3225: PetscMalloc1(Bn,&cmap1);
3227: VecGetArray(lvec,&xarray);
3228: VecGetArray(lcmap,&cmaparray);
3229: for (i=0; i<Bn; i++) {
3230: if (PetscRealPart(xarray[i]) > -1.0) {
3231: idx[count] = i; /* local column index in off-diagonal part B */
3232: cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]); /* column index in submat */
3233: count++;
3234: }
3235: }
3236: VecRestoreArray(lvec,&xarray);
3237: VecRestoreArray(lcmap,&cmaparray);
3239: ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3240: /* cannot ensure iscol_o has same blocksize as iscol! */
3242: PetscFree(idx);
3243: *garray = cmap1;
3245: VecDestroy(&x);
3246: VecDestroy(&cmap);
3247: VecDestroy(&lcmap);
3248: return(0);
3249: }
3251: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3252: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3253: {
3255: Mat_MPIAIJ *a = (Mat_MPIAIJ*)mat->data,*asub;
3256: Mat M = NULL;
3257: MPI_Comm comm;
3258: IS iscol_d,isrow_d,iscol_o;
3259: Mat Asub = NULL,Bsub = NULL;
3260: PetscInt n;
3263: PetscObjectGetComm((PetscObject)mat,&comm);
3265: if (call == MAT_REUSE_MATRIX) {
3266: /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3267: PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3268: if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");
3270: PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3271: if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");
3273: PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3274: if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");
3276: /* Update diagonal and off-diagonal portions of submat */
3277: asub = (Mat_MPIAIJ*)(*submat)->data;
3278: MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3279: ISGetLocalSize(iscol_o,&n);
3280: if (n) {
3281: MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3282: }
3283: MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3284: MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);
3286: } else { /* call == MAT_INITIAL_MATRIX) */
3287: const PetscInt *garray;
3288: PetscInt BsubN;
3290: /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3291: ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);
3293: /* Create local submatrices Asub and Bsub */
3294: MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3295: MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);
3297: /* Create submatrix M */
3298: MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);
3300: /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3301: asub = (Mat_MPIAIJ*)M->data;
3303: ISGetLocalSize(iscol_o,&BsubN);
3304: n = asub->B->cmap->N;
3305: if (BsubN > n) {
3306: /* This case can be tested using ~petsc/src/tao/bound/examples/tutorials/runplate2_3 */
3307: const PetscInt *idx;
3308: PetscInt i,j,*idx_new,*subgarray = asub->garray;
3309: PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);
3311: PetscMalloc1(n,&idx_new);
3312: j = 0;
3313: ISGetIndices(iscol_o,&idx);
3314: for (i=0; i<n; i++) {
3315: if (j >= BsubN) break;
3316: while (subgarray[i] > garray[j]) j++;
3318: if (subgarray[i] == garray[j]) {
3319: idx_new[i] = idx[j++];
3320: } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3321: }
3322: ISRestoreIndices(iscol_o,&idx);
3324: ISDestroy(&iscol_o);
3325: ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);
3327: } else if (BsubN < n) {
3328: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3329: }
3331: PetscFree(garray);
3332: *submat = M;
3334: /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3335: PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3336: ISDestroy(&isrow_d);
3338: PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3339: ISDestroy(&iscol_d);
3341: PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3342: ISDestroy(&iscol_o);
3343: }
3344: return(0);
3345: }
3347: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3348: {
3350: IS iscol_local=NULL,isrow_d;
3351: PetscInt csize;
3352: PetscInt n,i,j,start,end;
3353: PetscBool sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3354: MPI_Comm comm;
3357: /* If isrow has same processor distribution as mat,
3358: call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3359: if (call == MAT_REUSE_MATRIX) {
3360: PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3361: if (isrow_d) {
3362: sameRowDist = PETSC_TRUE;
3363: tsameDist[1] = PETSC_TRUE; /* sameColDist */
3364: } else {
3365: PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3366: if (iscol_local) {
3367: sameRowDist = PETSC_TRUE;
3368: tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3369: }
3370: }
3371: } else {
3372: /* Check if isrow has same processor distribution as mat */
3373: sameDist[0] = PETSC_FALSE;
3374: ISGetLocalSize(isrow,&n);
3375: if (!n) {
3376: sameDist[0] = PETSC_TRUE;
3377: } else {
3378: ISGetMinMax(isrow,&i,&j);
3379: MatGetOwnershipRange(mat,&start,&end);
3380: if (i >= start && j < end) {
3381: sameDist[0] = PETSC_TRUE;
3382: }
3383: }
3385: /* Check if iscol has same processor distribution as mat */
3386: sameDist[1] = PETSC_FALSE;
3387: ISGetLocalSize(iscol,&n);
3388: if (!n) {
3389: sameDist[1] = PETSC_TRUE;
3390: } else {
3391: ISGetMinMax(iscol,&i,&j);
3392: MatGetOwnershipRangeColumn(mat,&start,&end);
3393: if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3394: }
3396: PetscObjectGetComm((PetscObject)mat,&comm);
3397: MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3398: sameRowDist = tsameDist[0];
3399: }
3401: if (sameRowDist) {
3402: if (tsameDist[1]) { /* sameRowDist & sameColDist */
3403: /* isrow and iscol have same processor distribution as mat */
3404: MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3405: return(0);
3406: } else { /* sameRowDist */
3407: /* isrow has same processor distribution as mat */
3408: if (call == MAT_INITIAL_MATRIX) {
3409: PetscBool sorted;
3410: ISGetSeqIS_Private(mat,iscol,&iscol_local);
3411: ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3412: ISGetSize(iscol,&i);
3413: if (n != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != size of iscol %d",n,i);
3415: ISSorted(iscol_local,&sorted);
3416: if (sorted) {
3417: /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3418: MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3419: return(0);
3420: }
3421: } else { /* call == MAT_REUSE_MATRIX */
3422: IS iscol_sub;
3423: PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3424: if (iscol_sub) {
3425: MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3426: return(0);
3427: }
3428: }
3429: }
3430: }
3432: /* General case: iscol -> iscol_local which has global size of iscol */
3433: if (call == MAT_REUSE_MATRIX) {
3434: PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3435: if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3436: } else {
3437: if (!iscol_local) {
3438: ISGetSeqIS_Private(mat,iscol,&iscol_local);
3439: }
3440: }
3442: ISGetLocalSize(iscol,&csize);
3443: MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);
3445: if (call == MAT_INITIAL_MATRIX) {
3446: PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3447: ISDestroy(&iscol_local);
3448: }
3449: return(0);
3450: }
3452: /*@C
3453: MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3454: and "off-diagonal" part of the matrix in CSR format.
3456: Collective on MPI_Comm
3458: Input Parameters:
3459: + comm - MPI communicator
3460: . A - "diagonal" portion of matrix
3461: . B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3462: - garray - global index of B columns
3464: Output Parameter:
3465: . mat - the matrix, with input A as its local diagonal matrix
3466: Level: advanced
3468: Notes:
3469: See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3470: A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.
3472: .seealso: MatCreateMPIAIJWithSplitArrays()
3473: @*/
3474: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3475: {
3477: Mat_MPIAIJ *maij;
3478: Mat_SeqAIJ *b=(Mat_SeqAIJ*)B->data,*bnew;
3479: PetscInt *oi=b->i,*oj=b->j,i,nz,col;
3480: PetscScalar *oa=b->a;
3481: Mat Bnew;
3482: PetscInt m,n,N;
3485: MatCreate(comm,mat);
3486: MatGetSize(A,&m,&n);
3487: if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3488: if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3489: /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3490: /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */
3492: /* Get global columns of mat */
3493: MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);
3495: MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3496: MatSetType(*mat,MATMPIAIJ);
3497: MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3498: maij = (Mat_MPIAIJ*)(*mat)->data;
3500: (*mat)->preallocated = PETSC_TRUE;
3502: PetscLayoutSetUp((*mat)->rmap);
3503: PetscLayoutSetUp((*mat)->cmap);
3505: /* Set A as diagonal portion of *mat */
3506: maij->A = A;
3508: nz = oi[m];
3509: for (i=0; i<nz; i++) {
3510: col = oj[i];
3511: oj[i] = garray[col];
3512: }
3514: /* Set Bnew as off-diagonal portion of *mat */
3515: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,oa,&Bnew);
3516: bnew = (Mat_SeqAIJ*)Bnew->data;
3517: bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3518: maij->B = Bnew;
3520: if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,0,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);
3522: b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3523: b->free_a = PETSC_FALSE;
3524: b->free_ij = PETSC_FALSE;
3525: MatDestroy(&B);
3527: bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3528: bnew->free_a = PETSC_TRUE;
3529: bnew->free_ij = PETSC_TRUE;
3531: /* condense columns of maij->B */
3532: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3533: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3534: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3535: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3536: MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3537: return(0);
3538: }
3540: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);
3542: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3543: {
3545: PetscInt i,m,n,rstart,row,rend,nz,j,bs,cbs;
3546: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3547: Mat_MPIAIJ *a=(Mat_MPIAIJ*)mat->data;
3548: Mat M,Msub,B=a->B;
3549: MatScalar *aa;
3550: Mat_SeqAIJ *aij;
3551: PetscInt *garray = a->garray,*colsub,Ncols;
3552: PetscInt count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3553: IS iscol_sub,iscmap;
3554: const PetscInt *is_idx,*cmap;
3555: PetscBool allcolumns=PETSC_FALSE;
3556: MPI_Comm comm;
3559: PetscObjectGetComm((PetscObject)mat,&comm);
3561: if (call == MAT_REUSE_MATRIX) {
3562: PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3563: if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3564: ISGetLocalSize(iscol_sub,&count);
3566: PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3567: if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");
3569: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3570: if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3572: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);
3574: } else { /* call == MAT_INITIAL_MATRIX) */
3575: PetscBool flg;
3577: ISGetLocalSize(iscol,&n);
3578: ISGetSize(iscol,&Ncols);
3580: /* (1) iscol -> nonscalable iscol_local */
3581: /* Check for special case: each processor gets entire matrix columns */
3582: ISIdentity(iscol_local,&flg);
3583: if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3584: if (allcolumns) {
3585: iscol_sub = iscol_local;
3586: PetscObjectReference((PetscObject)iscol_local);
3587: ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);
3589: } else {
3590: /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3591: PetscInt *idx,*cmap1,k;
3592: PetscMalloc1(Ncols,&idx);
3593: PetscMalloc1(Ncols,&cmap1);
3594: ISGetIndices(iscol_local,&is_idx);
3595: count = 0;
3596: k = 0;
3597: for (i=0; i<Ncols; i++) {
3598: j = is_idx[i];
3599: if (j >= cstart && j < cend) {
3600: /* diagonal part of mat */
3601: idx[count] = j;
3602: cmap1[count++] = i; /* column index in submat */
3603: } else if (Bn) {
3604: /* off-diagonal part of mat */
3605: if (j == garray[k]) {
3606: idx[count] = j;
3607: cmap1[count++] = i; /* column index in submat */
3608: } else if (j > garray[k]) {
3609: while (j > garray[k] && k < Bn-1) k++;
3610: if (j == garray[k]) {
3611: idx[count] = j;
3612: cmap1[count++] = i; /* column index in submat */
3613: }
3614: }
3615: }
3616: }
3617: ISRestoreIndices(iscol_local,&is_idx);
3619: ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3620: ISGetBlockSize(iscol,&cbs);
3621: ISSetBlockSize(iscol_sub,cbs);
3623: ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3624: }
3626: /* (3) Create sequential Msub */
3627: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3628: }
3630: ISGetLocalSize(iscol_sub,&count);
3631: aij = (Mat_SeqAIJ*)(Msub)->data;
3632: ii = aij->i;
3633: ISGetIndices(iscmap,&cmap);
3635: /*
3636: m - number of local rows
3637: Ncols - number of columns (same on all processors)
3638: rstart - first row in new global matrix generated
3639: */
3640: MatGetSize(Msub,&m,NULL);
3642: if (call == MAT_INITIAL_MATRIX) {
3643: /* (4) Create parallel newmat */
3644: PetscMPIInt rank,size;
3645: PetscInt csize;
3647: MPI_Comm_size(comm,&size);
3648: MPI_Comm_rank(comm,&rank);
3650: /*
3651: Determine the number of non-zeros in the diagonal and off-diagonal
3652: portions of the matrix in order to do correct preallocation
3653: */
3655: /* first get start and end of "diagonal" columns */
3656: ISGetLocalSize(iscol,&csize);
3657: if (csize == PETSC_DECIDE) {
3658: ISGetSize(isrow,&mglobal);
3659: if (mglobal == Ncols) { /* square matrix */
3660: nlocal = m;
3661: } else {
3662: nlocal = Ncols/size + ((Ncols % size) > rank);
3663: }
3664: } else {
3665: nlocal = csize;
3666: }
3667: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3668: rstart = rend - nlocal;
3669: if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);
3671: /* next, compute all the lengths */
3672: jj = aij->j;
3673: PetscMalloc1(2*m+1,&dlens);
3674: olens = dlens + m;
3675: for (i=0; i<m; i++) {
3676: jend = ii[i+1] - ii[i];
3677: olen = 0;
3678: dlen = 0;
3679: for (j=0; j<jend; j++) {
3680: if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3681: else dlen++;
3682: jj++;
3683: }
3684: olens[i] = olen;
3685: dlens[i] = dlen;
3686: }
3688: ISGetBlockSize(isrow,&bs);
3689: ISGetBlockSize(iscol,&cbs);
3691: MatCreate(comm,&M);
3692: MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3693: MatSetBlockSizes(M,bs,cbs);
3694: MatSetType(M,((PetscObject)mat)->type_name);
3695: MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3696: PetscFree(dlens);
3698: } else { /* call == MAT_REUSE_MATRIX */
3699: M = *newmat;
3700: MatGetLocalSize(M,&i,NULL);
3701: if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3702: MatZeroEntries(M);
3703: /*
3704: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3705: rather than the slower MatSetValues().
3706: */
3707: M->was_assembled = PETSC_TRUE;
3708: M->assembled = PETSC_FALSE;
3709: }
3711: /* (5) Set values of Msub to *newmat */
3712: PetscMalloc1(count,&colsub);
3713: MatGetOwnershipRange(M,&rstart,NULL);
3715: jj = aij->j;
3716: aa = aij->a;
3717: for (i=0; i<m; i++) {
3718: row = rstart + i;
3719: nz = ii[i+1] - ii[i];
3720: for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3721: MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3722: jj += nz; aa += nz;
3723: }
3724: ISRestoreIndices(iscmap,&cmap);
3726: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3727: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3729: PetscFree(colsub);
3731: /* save Msub, iscol_sub and iscmap used in processor for next request */
3732: if (call == MAT_INITIAL_MATRIX) {
3733: *newmat = M;
3734: PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3735: MatDestroy(&Msub);
3737: PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3738: ISDestroy(&iscol_sub);
3740: PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3741: ISDestroy(&iscmap);
3743: if (iscol_local) {
3744: PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3745: ISDestroy(&iscol_local);
3746: }
3747: }
3748: return(0);
3749: }
3751: /*
3752: Not great since it makes two copies of the submatrix, first an SeqAIJ
3753: in local and then by concatenating the local matrices the end result.
3754: Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()
3756: Note: This requires a sequential iscol with all indices.
3757: */
3758: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3759: {
3761: PetscMPIInt rank,size;
3762: PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3763: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3764: Mat M,Mreuse;
3765: MatScalar *aa,*vwork;
3766: MPI_Comm comm;
3767: Mat_SeqAIJ *aij;
3768: PetscBool colflag,allcolumns=PETSC_FALSE;
3771: PetscObjectGetComm((PetscObject)mat,&comm);
3772: MPI_Comm_rank(comm,&rank);
3773: MPI_Comm_size(comm,&size);
3775: /* Check for special case: each processor gets entire matrix columns */
3776: ISIdentity(iscol,&colflag);
3777: ISGetLocalSize(iscol,&n);
3778: if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3780: if (call == MAT_REUSE_MATRIX) {
3781: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3782: if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3783: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3784: } else {
3785: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3786: }
3788: /*
3789: m - number of local rows
3790: n - number of columns (same on all processors)
3791: rstart - first row in new global matrix generated
3792: */
3793: MatGetSize(Mreuse,&m,&n);
3794: MatGetBlockSizes(Mreuse,&bs,&cbs);
3795: if (call == MAT_INITIAL_MATRIX) {
3796: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3797: ii = aij->i;
3798: jj = aij->j;
3800: /*
3801: Determine the number of non-zeros in the diagonal and off-diagonal
3802: portions of the matrix in order to do correct preallocation
3803: */
3805: /* first get start and end of "diagonal" columns */
3806: if (csize == PETSC_DECIDE) {
3807: ISGetSize(isrow,&mglobal);
3808: if (mglobal == n) { /* square matrix */
3809: nlocal = m;
3810: } else {
3811: nlocal = n/size + ((n % size) > rank);
3812: }
3813: } else {
3814: nlocal = csize;
3815: }
3816: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3817: rstart = rend - nlocal;
3818: if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3820: /* next, compute all the lengths */
3821: PetscMalloc1(2*m+1,&dlens);
3822: olens = dlens + m;
3823: for (i=0; i<m; i++) {
3824: jend = ii[i+1] - ii[i];
3825: olen = 0;
3826: dlen = 0;
3827: for (j=0; j<jend; j++) {
3828: if (*jj < rstart || *jj >= rend) olen++;
3829: else dlen++;
3830: jj++;
3831: }
3832: olens[i] = olen;
3833: dlens[i] = dlen;
3834: }
3835: MatCreate(comm,&M);
3836: MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3837: MatSetBlockSizes(M,bs,cbs);
3838: MatSetType(M,((PetscObject)mat)->type_name);
3839: MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3840: PetscFree(dlens);
3841: } else {
3842: PetscInt ml,nl;
3844: M = *newmat;
3845: MatGetLocalSize(M,&ml,&nl);
3846: if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3847: MatZeroEntries(M);
3848: /*
3849: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3850: rather than the slower MatSetValues().
3851: */
3852: M->was_assembled = PETSC_TRUE;
3853: M->assembled = PETSC_FALSE;
3854: }
3855: MatGetOwnershipRange(M,&rstart,&rend);
3856: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3857: ii = aij->i;
3858: jj = aij->j;
3859: aa = aij->a;
3860: for (i=0; i<m; i++) {
3861: row = rstart + i;
3862: nz = ii[i+1] - ii[i];
3863: cwork = jj; jj += nz;
3864: vwork = aa; aa += nz;
3865: MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3866: }
3868: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3869: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3870: *newmat = M;
3872: /* save submatrix used in processor for next request */
3873: if (call == MAT_INITIAL_MATRIX) {
3874: PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3875: MatDestroy(&Mreuse);
3876: }
3877: return(0);
3878: }
3880: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3881: {
3882: PetscInt m,cstart, cend,j,nnz,i,d;
3883: PetscInt *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3884: const PetscInt *JJ;
3885: PetscScalar *values;
3887: PetscBool nooffprocentries;
3890: if (Ii && Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);
3892: PetscLayoutSetUp(B->rmap);
3893: PetscLayoutSetUp(B->cmap);
3894: m = B->rmap->n;
3895: cstart = B->cmap->rstart;
3896: cend = B->cmap->rend;
3897: rstart = B->rmap->rstart;
3899: PetscCalloc2(m,&d_nnz,m,&o_nnz);
3901: #if defined(PETSC_USE_DEBUG)
3902: for (i=0; i<m && Ii; i++) {
3903: nnz = Ii[i+1]- Ii[i];
3904: JJ = J + Ii[i];
3905: if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3906: if (nnz && (JJ[0] < 0)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,JJ[0]);
3907: if (nnz && (JJ[nnz-1] >= B->cmap->N)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3908: }
3909: #endif
3911: for (i=0; i<m && Ii; i++) {
3912: nnz = Ii[i+1]- Ii[i];
3913: JJ = J + Ii[i];
3914: nnz_max = PetscMax(nnz_max,nnz);
3915: d = 0;
3916: for (j=0; j<nnz; j++) {
3917: if (cstart <= JJ[j] && JJ[j] < cend) d++;
3918: }
3919: d_nnz[i] = d;
3920: o_nnz[i] = nnz - d;
3921: }
3922: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3923: PetscFree2(d_nnz,o_nnz);
3925: if (v) values = (PetscScalar*)v;
3926: else {
3927: PetscCalloc1(nnz_max+1,&values);
3928: }
3930: for (i=0; i<m && Ii; i++) {
3931: ii = i + rstart;
3932: nnz = Ii[i+1]- Ii[i];
3933: MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);
3934: }
3935: nooffprocentries = B->nooffprocentries;
3936: B->nooffprocentries = PETSC_TRUE;
3937: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3938: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3939: B->nooffprocentries = nooffprocentries;
3941: if (!v) {
3942: PetscFree(values);
3943: }
3944: MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3945: return(0);
3946: }
3948: /*@
3949: MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3950: (the default parallel PETSc format).
3952: Collective on MPI_Comm
3954: Input Parameters:
3955: + B - the matrix
3956: . i - the indices into j for the start of each local row (starts with zero)
3957: . j - the column indices for each local row (starts with zero)
3958: - v - optional values in the matrix
3960: Level: developer
3962: Notes:
3963: The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
3964: thus you CANNOT change the matrix entries by changing the values of v[] after you have
3965: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3967: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3969: The format which is used for the sparse matrix input, is equivalent to a
3970: row-major ordering.. i.e for the following matrix, the input data expected is
3971: as shown
3973: $ 1 0 0
3974: $ 2 0 3 P0
3975: $ -------
3976: $ 4 5 6 P1
3977: $
3978: $ Process0 [P0]: rows_owned=[0,1]
3979: $ i = {0,1,3} [size = nrow+1 = 2+1]
3980: $ j = {0,0,2} [size = 3]
3981: $ v = {1,2,3} [size = 3]
3982: $
3983: $ Process1 [P1]: rows_owned=[2]
3984: $ i = {0,3} [size = nrow+1 = 1+1]
3985: $ j = {0,1,2} [size = 3]
3986: $ v = {4,5,6} [size = 3]
3988: .keywords: matrix, aij, compressed row, sparse, parallel
3990: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3991: MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3992: @*/
3993: PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3994: {
3998: PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3999: return(0);
4000: }
4002: /*@C
4003: MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
4004: (the default parallel PETSc format). For good matrix assembly performance
4005: the user should preallocate the matrix storage by setting the parameters
4006: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
4007: performance can be increased by more than a factor of 50.
4009: Collective on MPI_Comm
4011: Input Parameters:
4012: + B - the matrix
4013: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
4014: (same value is used for all local rows)
4015: . d_nnz - array containing the number of nonzeros in the various rows of the
4016: DIAGONAL portion of the local submatrix (possibly different for each row)
4017: or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
4018: The size of this array is equal to the number of local rows, i.e 'm'.
4019: For matrices that will be factored, you must leave room for (and set)
4020: the diagonal entry even if it is zero.
4021: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
4022: submatrix (same value is used for all local rows).
4023: - o_nnz - array containing the number of nonzeros in the various rows of the
4024: OFF-DIAGONAL portion of the local submatrix (possibly different for
4025: each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
4026: structure. The size of this array is equal to the number
4027: of local rows, i.e 'm'.
4029: If the *_nnz parameter is given then the *_nz parameter is ignored
4031: The AIJ format (also called the Yale sparse matrix format or
4032: compressed row storage (CSR)), is fully compatible with standard Fortran 77
4033: storage. The stored row and column indices begin with zero.
4034: See Users-Manual: ch_mat for details.
4036: The parallel matrix is partitioned such that the first m0 rows belong to
4037: process 0, the next m1 rows belong to process 1, the next m2 rows belong
4038: to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
4040: The DIAGONAL portion of the local submatrix of a processor can be defined
4041: as the submatrix which is obtained by extraction the part corresponding to
4042: the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
4043: first row that belongs to the processor, r2 is the last row belonging to
4044: the this processor, and c1-c2 is range of indices of the local part of a
4045: vector suitable for applying the matrix to. This is an mxn matrix. In the
4046: common case of a square matrix, the row and column ranges are the same and
4047: the DIAGONAL part is also square. The remaining portion of the local
4048: submatrix (mxN) constitute the OFF-DIAGONAL portion.
4050: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
4052: You can call MatGetInfo() to get information on how effective the preallocation was;
4053: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
4054: You can also run with the option -info and look for messages with the string
4055: malloc in them to see if additional memory allocation was needed.
4057: Example usage:
4059: Consider the following 8x8 matrix with 34 non-zero values, that is
4060: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4061: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4062: as follows:
4064: .vb
4065: 1 2 0 | 0 3 0 | 0 4
4066: Proc0 0 5 6 | 7 0 0 | 8 0
4067: 9 0 10 | 11 0 0 | 12 0
4068: -------------------------------------
4069: 13 0 14 | 15 16 17 | 0 0
4070: Proc1 0 18 0 | 19 20 21 | 0 0
4071: 0 0 0 | 22 23 0 | 24 0
4072: -------------------------------------
4073: Proc2 25 26 27 | 0 0 28 | 29 0
4074: 30 0 0 | 31 32 33 | 0 34
4075: .ve
4077: This can be represented as a collection of submatrices as:
4079: .vb
4080: A B C
4081: D E F
4082: G H I
4083: .ve
4085: Where the submatrices A,B,C are owned by proc0, D,E,F are
4086: owned by proc1, G,H,I are owned by proc2.
4088: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4089: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4090: The 'M','N' parameters are 8,8, and have the same values on all procs.
4092: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4093: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4094: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4095: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4096: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4097: matrix, ans [DF] as another SeqAIJ matrix.
4099: When d_nz, o_nz parameters are specified, d_nz storage elements are
4100: allocated for every row of the local diagonal submatrix, and o_nz
4101: storage locations are allocated for every row of the OFF-DIAGONAL submat.
4102: One way to choose d_nz and o_nz is to use the max nonzerors per local
4103: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4104: In this case, the values of d_nz,o_nz are:
4105: .vb
4106: proc0 : dnz = 2, o_nz = 2
4107: proc1 : dnz = 3, o_nz = 2
4108: proc2 : dnz = 1, o_nz = 4
4109: .ve
4110: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4111: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4112: for proc3. i.e we are using 12+15+10=37 storage locations to store
4113: 34 values.
4115: When d_nnz, o_nnz parameters are specified, the storage is specified
4116: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4117: In the above case the values for d_nnz,o_nnz are:
4118: .vb
4119: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4120: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4121: proc2: d_nnz = [1,1] and o_nnz = [4,4]
4122: .ve
4123: Here the space allocated is sum of all the above values i.e 34, and
4124: hence pre-allocation is perfect.
4126: Level: intermediate
4128: .keywords: matrix, aij, compressed row, sparse, parallel
4130: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
4131: MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
4132: @*/
4133: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
4134: {
4140: PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
4141: return(0);
4142: }
4144: /*@
4145: MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
4146: CSR format the local rows.
4148: Collective on MPI_Comm
4150: Input Parameters:
4151: + comm - MPI communicator
4152: . m - number of local rows (Cannot be PETSC_DECIDE)
4153: . n - This value should be the same as the local size used in creating the
4154: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4155: calculated if N is given) For square matrices n is almost always m.
4156: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4157: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4158: . i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4159: . j - column indices
4160: - a - matrix values
4162: Output Parameter:
4163: . mat - the matrix
4165: Level: intermediate
4167: Notes:
4168: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4169: thus you CANNOT change the matrix entries by changing the values of a[] after you have
4170: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
4172: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
4174: The format which is used for the sparse matrix input, is equivalent to a
4175: row-major ordering.. i.e for the following matrix, the input data expected is
4176: as shown
4178: $ 1 0 0
4179: $ 2 0 3 P0
4180: $ -------
4181: $ 4 5 6 P1
4182: $
4183: $ Process0 [P0]: rows_owned=[0,1]
4184: $ i = {0,1,3} [size = nrow+1 = 2+1]
4185: $ j = {0,0,2} [size = 3]
4186: $ v = {1,2,3} [size = 3]
4187: $
4188: $ Process1 [P1]: rows_owned=[2]
4189: $ i = {0,3} [size = nrow+1 = 1+1]
4190: $ j = {0,1,2} [size = 3]
4191: $ v = {4,5,6} [size = 3]
4193: .keywords: matrix, aij, compressed row, sparse, parallel
4195: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4196: MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays()
4197: @*/
4198: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4199: {
4203: if (i && i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4204: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4205: MatCreate(comm,mat);
4206: MatSetSizes(*mat,m,n,M,N);
4207: /* MatSetBlockSizes(M,bs,cbs); */
4208: MatSetType(*mat,MATMPIAIJ);
4209: MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4210: return(0);
4211: }
4213: /*@C
4214: MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4215: (the default parallel PETSc format). For good matrix assembly performance
4216: the user should preallocate the matrix storage by setting the parameters
4217: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
4218: performance can be increased by more than a factor of 50.
4220: Collective on MPI_Comm
4222: Input Parameters:
4223: + comm - MPI communicator
4224: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4225: This value should be the same as the local size used in creating the
4226: y vector for the matrix-vector product y = Ax.
4227: . n - This value should be the same as the local size used in creating the
4228: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4229: calculated if N is given) For square matrices n is almost always m.
4230: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4231: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4232: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
4233: (same value is used for all local rows)
4234: . d_nnz - array containing the number of nonzeros in the various rows of the
4235: DIAGONAL portion of the local submatrix (possibly different for each row)
4236: or NULL, if d_nz is used to specify the nonzero structure.
4237: The size of this array is equal to the number of local rows, i.e 'm'.
4238: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
4239: submatrix (same value is used for all local rows).
4240: - o_nnz - array containing the number of nonzeros in the various rows of the
4241: OFF-DIAGONAL portion of the local submatrix (possibly different for
4242: each row) or NULL, if o_nz is used to specify the nonzero
4243: structure. The size of this array is equal to the number
4244: of local rows, i.e 'm'.
4246: Output Parameter:
4247: . A - the matrix
4249: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4250: MatXXXXSetPreallocation() paradgm instead of this routine directly.
4251: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
4253: Notes:
4254: If the *_nnz parameter is given then the *_nz parameter is ignored
4256: m,n,M,N parameters specify the size of the matrix, and its partitioning across
4257: processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4258: storage requirements for this matrix.
4260: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one
4261: processor than it must be used on all processors that share the object for
4262: that argument.
4264: The user MUST specify either the local or global matrix dimensions
4265: (possibly both).
4267: The parallel matrix is partitioned across processors such that the
4268: first m0 rows belong to process 0, the next m1 rows belong to
4269: process 1, the next m2 rows belong to process 2 etc.. where
4270: m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4271: values corresponding to [m x N] submatrix.
4273: The columns are logically partitioned with the n0 columns belonging
4274: to 0th partition, the next n1 columns belonging to the next
4275: partition etc.. where n0,n1,n2... are the input parameter 'n'.
4277: The DIAGONAL portion of the local submatrix on any given processor
4278: is the submatrix corresponding to the rows and columns m,n
4279: corresponding to the given processor. i.e diagonal matrix on
4280: process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4281: etc. The remaining portion of the local submatrix [m x (N-n)]
4282: constitute the OFF-DIAGONAL portion. The example below better
4283: illustrates this concept.
4285: For a square global matrix we define each processor's diagonal portion
4286: to be its local rows and the corresponding columns (a square submatrix);
4287: each processor's off-diagonal portion encompasses the remainder of the
4288: local matrix (a rectangular submatrix).
4290: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
4292: When calling this routine with a single process communicator, a matrix of
4293: type SEQAIJ is returned. If a matrix of type MPIAIJ is desired for this
4294: type of communicator, use the construction mechanism
4295: .vb
4296: MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4297: .ve
4299: $ MatCreate(...,&A);
4300: $ MatSetType(A,MATMPIAIJ);
4301: $ MatSetSizes(A, m,n,M,N);
4302: $ MatMPIAIJSetPreallocation(A,...);
4304: By default, this format uses inodes (identical nodes) when possible.
4305: We search for consecutive rows with the same nonzero structure, thereby
4306: reusing matrix information to achieve increased efficiency.
4308: Options Database Keys:
4309: + -mat_no_inode - Do not use inodes
4310: - -mat_inode_limit <limit> - Sets inode limit (max limit=5)
4314: Example usage:
4316: Consider the following 8x8 matrix with 34 non-zero values, that is
4317: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4318: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4319: as follows
4321: .vb
4322: 1 2 0 | 0 3 0 | 0 4
4323: Proc0 0 5 6 | 7 0 0 | 8 0
4324: 9 0 10 | 11 0 0 | 12 0
4325: -------------------------------------
4326: 13 0 14 | 15 16 17 | 0 0
4327: Proc1 0 18 0 | 19 20 21 | 0 0
4328: 0 0 0 | 22 23 0 | 24 0
4329: -------------------------------------
4330: Proc2 25 26 27 | 0 0 28 | 29 0
4331: 30 0 0 | 31 32 33 | 0 34
4332: .ve
4334: This can be represented as a collection of submatrices as
4336: .vb
4337: A B C
4338: D E F
4339: G H I
4340: .ve
4342: Where the submatrices A,B,C are owned by proc0, D,E,F are
4343: owned by proc1, G,H,I are owned by proc2.
4345: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4346: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4347: The 'M','N' parameters are 8,8, and have the same values on all procs.
4349: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4350: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4351: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4352: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4353: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4354: matrix, ans [DF] as another SeqAIJ matrix.
4356: When d_nz, o_nz parameters are specified, d_nz storage elements are
4357: allocated for every row of the local diagonal submatrix, and o_nz
4358: storage locations are allocated for every row of the OFF-DIAGONAL submat.
4359: One way to choose d_nz and o_nz is to use the max nonzerors per local
4360: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4361: In this case, the values of d_nz,o_nz are
4362: .vb
4363: proc0 : dnz = 2, o_nz = 2
4364: proc1 : dnz = 3, o_nz = 2
4365: proc2 : dnz = 1, o_nz = 4
4366: .ve
4367: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4368: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4369: for proc3. i.e we are using 12+15+10=37 storage locations to store
4370: 34 values.
4372: When d_nnz, o_nnz parameters are specified, the storage is specified
4373: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4374: In the above case the values for d_nnz,o_nnz are
4375: .vb
4376: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4377: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4378: proc2: d_nnz = [1,1] and o_nnz = [4,4]
4379: .ve
4380: Here the space allocated is sum of all the above values i.e 34, and
4381: hence pre-allocation is perfect.
4383: Level: intermediate
4385: .keywords: matrix, aij, compressed row, sparse, parallel
4387: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4388: MATMPIAIJ, MatCreateMPIAIJWithArrays()
4389: @*/
4390: PetscErrorCode MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4391: {
4393: PetscMPIInt size;
4396: MatCreate(comm,A);
4397: MatSetSizes(*A,m,n,M,N);
4398: MPI_Comm_size(comm,&size);
4399: if (size > 1) {
4400: MatSetType(*A,MATMPIAIJ);
4401: MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4402: } else {
4403: MatSetType(*A,MATSEQAIJ);
4404: MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4405: }
4406: return(0);
4407: }
4409: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4410: {
4411: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
4412: PetscBool flg;
4416: PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4417: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4418: if (Ad) *Ad = a->A;
4419: if (Ao) *Ao = a->B;
4420: if (colmap) *colmap = a->garray;
4421: return(0);
4422: }
4424: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4425: {
4427: PetscInt m,N,i,rstart,nnz,Ii;
4428: PetscInt *indx;
4429: PetscScalar *values;
4432: MatGetSize(inmat,&m,&N);
4433: if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4434: PetscInt *dnz,*onz,sum,bs,cbs;
4436: if (n == PETSC_DECIDE) {
4437: PetscSplitOwnership(comm,&n,&N);
4438: }
4439: /* Check sum(n) = N */
4440: MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4441: if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);
4443: MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4444: rstart -= m;
4446: MatPreallocateInitialize(comm,m,n,dnz,onz);
4447: for (i=0; i<m; i++) {
4448: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4449: MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4450: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4451: }
4453: MatCreate(comm,outmat);
4454: MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4455: MatGetBlockSizes(inmat,&bs,&cbs);
4456: MatSetBlockSizes(*outmat,bs,cbs);
4457: MatSetType(*outmat,MATAIJ);
4458: MatSeqAIJSetPreallocation(*outmat,0,dnz);
4459: MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4460: MatPreallocateFinalize(dnz,onz);
4461: }
4463: /* numeric phase */
4464: MatGetOwnershipRange(*outmat,&rstart,NULL);
4465: for (i=0; i<m; i++) {
4466: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4467: Ii = i + rstart;
4468: MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4469: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4470: }
4471: MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4472: MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4473: return(0);
4474: }
4476: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4477: {
4478: PetscErrorCode ierr;
4479: PetscMPIInt rank;
4480: PetscInt m,N,i,rstart,nnz;
4481: size_t len;
4482: const PetscInt *indx;
4483: PetscViewer out;
4484: char *name;
4485: Mat B;
4486: const PetscScalar *values;
4489: MatGetLocalSize(A,&m,0);
4490: MatGetSize(A,0,&N);
4491: /* Should this be the type of the diagonal block of A? */
4492: MatCreate(PETSC_COMM_SELF,&B);
4493: MatSetSizes(B,m,N,m,N);
4494: MatSetBlockSizesFromMats(B,A,A);
4495: MatSetType(B,MATSEQAIJ);
4496: MatSeqAIJSetPreallocation(B,0,NULL);
4497: MatGetOwnershipRange(A,&rstart,0);
4498: for (i=0; i<m; i++) {
4499: MatGetRow(A,i+rstart,&nnz,&indx,&values);
4500: MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4501: MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4502: }
4503: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4504: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
4506: MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4507: PetscStrlen(outfile,&len);
4508: PetscMalloc1(len+5,&name);
4509: sprintf(name,"%s.%d",outfile,rank);
4510: PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4511: PetscFree(name);
4512: MatView(B,out);
4513: PetscViewerDestroy(&out);
4514: MatDestroy(&B);
4515: return(0);
4516: }
4518: PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
4519: {
4520: PetscErrorCode ierr;
4521: Mat_Merge_SeqsToMPI *merge;
4522: PetscContainer container;
4525: PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);
4526: if (container) {
4527: PetscContainerGetPointer(container,(void**)&merge);
4528: PetscFree(merge->id_r);
4529: PetscFree(merge->len_s);
4530: PetscFree(merge->len_r);
4531: PetscFree(merge->bi);
4532: PetscFree(merge->bj);
4533: PetscFree(merge->buf_ri[0]);
4534: PetscFree(merge->buf_ri);
4535: PetscFree(merge->buf_rj[0]);
4536: PetscFree(merge->buf_rj);
4537: PetscFree(merge->coi);
4538: PetscFree(merge->coj);
4539: PetscFree(merge->owners_co);
4540: PetscLayoutDestroy(&merge->rowmap);
4541: PetscFree(merge);
4542: PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);
4543: }
4544: MatDestroy_MPIAIJ(A);
4545: return(0);
4546: }
4548: #include <../src/mat/utils/freespace.h>
4549: #include <petscbt.h>
4551: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4552: {
4553: PetscErrorCode ierr;
4554: MPI_Comm comm;
4555: Mat_SeqAIJ *a =(Mat_SeqAIJ*)seqmat->data;
4556: PetscMPIInt size,rank,taga,*len_s;
4557: PetscInt N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4558: PetscInt proc,m;
4559: PetscInt **buf_ri,**buf_rj;
4560: PetscInt k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4561: PetscInt nrows,**buf_ri_k,**nextrow,**nextai;
4562: MPI_Request *s_waits,*r_waits;
4563: MPI_Status *status;
4564: MatScalar *aa=a->a;
4565: MatScalar **abuf_r,*ba_i;
4566: Mat_Merge_SeqsToMPI *merge;
4567: PetscContainer container;
4570: PetscObjectGetComm((PetscObject)mpimat,&comm);
4571: PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);
4573: MPI_Comm_size(comm,&size);
4574: MPI_Comm_rank(comm,&rank);
4576: PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4577: PetscContainerGetPointer(container,(void**)&merge);
4579: bi = merge->bi;
4580: bj = merge->bj;
4581: buf_ri = merge->buf_ri;
4582: buf_rj = merge->buf_rj;
4584: PetscMalloc1(size,&status);
4585: owners = merge->rowmap->range;
4586: len_s = merge->len_s;
4588: /* send and recv matrix values */
4589: /*-----------------------------*/
4590: PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4591: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
4593: PetscMalloc1(merge->nsend+1,&s_waits);
4594: for (proc=0,k=0; proc<size; proc++) {
4595: if (!len_s[proc]) continue;
4596: i = owners[proc];
4597: MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4598: k++;
4599: }
4601: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4602: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4603: PetscFree(status);
4605: PetscFree(s_waits);
4606: PetscFree(r_waits);
4608: /* insert mat values of mpimat */
4609: /*----------------------------*/
4610: PetscMalloc1(N,&ba_i);
4611: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);
4613: for (k=0; k<merge->nrecv; k++) {
4614: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4615: nrows = *(buf_ri_k[k]);
4616: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
4617: nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
4618: }
4620: /* set values of ba */
4621: m = merge->rowmap->n;
4622: for (i=0; i<m; i++) {
4623: arow = owners[rank] + i;
4624: bj_i = bj+bi[i]; /* col indices of the i-th row of mpimat */
4625: bnzi = bi[i+1] - bi[i];
4626: PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));
4628: /* add local non-zero vals of this proc's seqmat into ba */
4629: anzi = ai[arow+1] - ai[arow];
4630: aj = a->j + ai[arow];
4631: aa = a->a + ai[arow];
4632: nextaj = 0;
4633: for (j=0; nextaj<anzi; j++) {
4634: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4635: ba_i[j] += aa[nextaj++];
4636: }
4637: }
4639: /* add received vals into ba */
4640: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4641: /* i-th row */
4642: if (i == *nextrow[k]) {
4643: anzi = *(nextai[k]+1) - *nextai[k];
4644: aj = buf_rj[k] + *(nextai[k]);
4645: aa = abuf_r[k] + *(nextai[k]);
4646: nextaj = 0;
4647: for (j=0; nextaj<anzi; j++) {
4648: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4649: ba_i[j] += aa[nextaj++];
4650: }
4651: }
4652: nextrow[k]++; nextai[k]++;
4653: }
4654: }
4655: MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4656: }
4657: MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4658: MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);
4660: PetscFree(abuf_r[0]);
4661: PetscFree(abuf_r);
4662: PetscFree(ba_i);
4663: PetscFree3(buf_ri_k,nextrow,nextai);
4664: PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4665: return(0);
4666: }
4668: PetscErrorCode MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4669: {
4670: PetscErrorCode ierr;
4671: Mat B_mpi;
4672: Mat_SeqAIJ *a=(Mat_SeqAIJ*)seqmat->data;
4673: PetscMPIInt size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4674: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
4675: PetscInt M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4676: PetscInt len,proc,*dnz,*onz,bs,cbs;
4677: PetscInt k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4678: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4679: MPI_Request *si_waits,*sj_waits,*ri_waits,*rj_waits;
4680: MPI_Status *status;
4681: PetscFreeSpaceList free_space=NULL,current_space=NULL;
4682: PetscBT lnkbt;
4683: Mat_Merge_SeqsToMPI *merge;
4684: PetscContainer container;
4687: PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);
4689: /* make sure it is a PETSc comm */
4690: PetscCommDuplicate(comm,&comm,NULL);
4691: MPI_Comm_size(comm,&size);
4692: MPI_Comm_rank(comm,&rank);
4694: PetscNew(&merge);
4695: PetscMalloc1(size,&status);
4697: /* determine row ownership */
4698: /*---------------------------------------------------------*/
4699: PetscLayoutCreate(comm,&merge->rowmap);
4700: PetscLayoutSetLocalSize(merge->rowmap,m);
4701: PetscLayoutSetSize(merge->rowmap,M);
4702: PetscLayoutSetBlockSize(merge->rowmap,1);
4703: PetscLayoutSetUp(merge->rowmap);
4704: PetscMalloc1(size,&len_si);
4705: PetscMalloc1(size,&merge->len_s);
4707: m = merge->rowmap->n;
4708: owners = merge->rowmap->range;
4710: /* determine the number of messages to send, their lengths */
4711: /*---------------------------------------------------------*/
4712: len_s = merge->len_s;
4714: len = 0; /* length of buf_si[] */
4715: merge->nsend = 0;
4716: for (proc=0; proc<size; proc++) {
4717: len_si[proc] = 0;
4718: if (proc == rank) {
4719: len_s[proc] = 0;
4720: } else {
4721: len_si[proc] = owners[proc+1] - owners[proc] + 1;
4722: len_s[proc] = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4723: }
4724: if (len_s[proc]) {
4725: merge->nsend++;
4726: nrows = 0;
4727: for (i=owners[proc]; i<owners[proc+1]; i++) {
4728: if (ai[i+1] > ai[i]) nrows++;
4729: }
4730: len_si[proc] = 2*(nrows+1);
4731: len += len_si[proc];
4732: }
4733: }
4735: /* determine the number and length of messages to receive for ij-structure */
4736: /*-------------------------------------------------------------------------*/
4737: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4738: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
4740: /* post the Irecv of j-structure */
4741: /*-------------------------------*/
4742: PetscCommGetNewTag(comm,&tagj);
4743: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);
4745: /* post the Isend of j-structure */
4746: /*--------------------------------*/
4747: PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);
4749: for (proc=0, k=0; proc<size; proc++) {
4750: if (!len_s[proc]) continue;
4751: i = owners[proc];
4752: MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4753: k++;
4754: }
4756: /* receives and sends of j-structure are complete */
4757: /*------------------------------------------------*/
4758: if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4759: if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}
4761: /* send and recv i-structure */
4762: /*---------------------------*/
4763: PetscCommGetNewTag(comm,&tagi);
4764: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);
4766: PetscMalloc1(len+1,&buf_s);
4767: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
4768: for (proc=0,k=0; proc<size; proc++) {
4769: if (!len_s[proc]) continue;
4770: /* form outgoing message for i-structure:
4771: buf_si[0]: nrows to be sent
4772: [1:nrows]: row index (global)
4773: [nrows+1:2*nrows+1]: i-structure index
4774: */
4775: /*-------------------------------------------*/
4776: nrows = len_si[proc]/2 - 1;
4777: buf_si_i = buf_si + nrows+1;
4778: buf_si[0] = nrows;
4779: buf_si_i[0] = 0;
4780: nrows = 0;
4781: for (i=owners[proc]; i<owners[proc+1]; i++) {
4782: anzi = ai[i+1] - ai[i];
4783: if (anzi) {
4784: buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4785: buf_si[nrows+1] = i-owners[proc]; /* local row index */
4786: nrows++;
4787: }
4788: }
4789: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4790: k++;
4791: buf_si += len_si[proc];
4792: }
4794: if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4795: if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}
4797: PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4798: for (i=0; i<merge->nrecv; i++) {
4799: PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4800: }
4802: PetscFree(len_si);
4803: PetscFree(len_ri);
4804: PetscFree(rj_waits);
4805: PetscFree2(si_waits,sj_waits);
4806: PetscFree(ri_waits);
4807: PetscFree(buf_s);
4808: PetscFree(status);
4810: /* compute a local seq matrix in each processor */
4811: /*----------------------------------------------*/
4812: /* allocate bi array and free space for accumulating nonzero column info */
4813: PetscMalloc1(m+1,&bi);
4814: bi[0] = 0;
4816: /* create and initialize a linked list */
4817: nlnk = N+1;
4818: PetscLLCreate(N,N,nlnk,lnk,lnkbt);
4820: /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4821: len = ai[owners[rank+1]] - ai[owners[rank]];
4822: PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);
4824: current_space = free_space;
4826: /* determine symbolic info for each local row */
4827: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);
4829: for (k=0; k<merge->nrecv; k++) {
4830: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4831: nrows = *buf_ri_k[k];
4832: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
4833: nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
4834: }
4836: MatPreallocateInitialize(comm,m,n,dnz,onz);
4837: len = 0;
4838: for (i=0; i<m; i++) {
4839: bnzi = 0;
4840: /* add local non-zero cols of this proc's seqmat into lnk */
4841: arow = owners[rank] + i;
4842: anzi = ai[arow+1] - ai[arow];
4843: aj = a->j + ai[arow];
4844: PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4845: bnzi += nlnk;
4846: /* add received col data into lnk */
4847: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4848: if (i == *nextrow[k]) { /* i-th row */
4849: anzi = *(nextai[k]+1) - *nextai[k];
4850: aj = buf_rj[k] + *nextai[k];
4851: PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4852: bnzi += nlnk;
4853: nextrow[k]++; nextai[k]++;
4854: }
4855: }
4856: if (len < bnzi) len = bnzi; /* =max(bnzi) */
4858: /* if free space is not available, make more free space */
4859: if (current_space->local_remaining<bnzi) {
4860: PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),¤t_space);
4861: nspacedouble++;
4862: }
4863: /* copy data into free space, then initialize lnk */
4864: PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4865: MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);
4867: current_space->array += bnzi;
4868: current_space->local_used += bnzi;
4869: current_space->local_remaining -= bnzi;
4871: bi[i+1] = bi[i] + bnzi;
4872: }
4874: PetscFree3(buf_ri_k,nextrow,nextai);
4876: PetscMalloc1(bi[m]+1,&bj);
4877: PetscFreeSpaceContiguous(&free_space,bj);
4878: PetscLLDestroy(lnk,lnkbt);
4880: /* create symbolic parallel matrix B_mpi */
4881: /*---------------------------------------*/
4882: MatGetBlockSizes(seqmat,&bs,&cbs);
4883: MatCreate(comm,&B_mpi);
4884: if (n==PETSC_DECIDE) {
4885: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4886: } else {
4887: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4888: }
4889: MatSetBlockSizes(B_mpi,bs,cbs);
4890: MatSetType(B_mpi,MATMPIAIJ);
4891: MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4892: MatPreallocateFinalize(dnz,onz);
4893: MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);
4895: /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4896: B_mpi->assembled = PETSC_FALSE;
4897: B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
4898: merge->bi = bi;
4899: merge->bj = bj;
4900: merge->buf_ri = buf_ri;
4901: merge->buf_rj = buf_rj;
4902: merge->coi = NULL;
4903: merge->coj = NULL;
4904: merge->owners_co = NULL;
4906: PetscCommDestroy(&comm);
4908: /* attach the supporting struct to B_mpi for reuse */
4909: PetscContainerCreate(PETSC_COMM_SELF,&container);
4910: PetscContainerSetPointer(container,merge);
4911: PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4912: PetscContainerDestroy(&container);
4913: *mpimat = B_mpi;
4915: PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4916: return(0);
4917: }
4919: /*@C
4920: MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4921: matrices from each processor
4923: Collective on MPI_Comm
4925: Input Parameters:
4926: + comm - the communicators the parallel matrix will live on
4927: . seqmat - the input sequential matrices
4928: . m - number of local rows (or PETSC_DECIDE)
4929: . n - number of local columns (or PETSC_DECIDE)
4930: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4932: Output Parameter:
4933: . mpimat - the parallel matrix generated
4935: Level: advanced
4937: Notes:
4938: The dimensions of the sequential matrix in each processor MUST be the same.
4939: The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4940: destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4941: @*/
4942: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4943: {
4945: PetscMPIInt size;
4948: MPI_Comm_size(comm,&size);
4949: if (size == 1) {
4950: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4951: if (scall == MAT_INITIAL_MATRIX) {
4952: MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
4953: } else {
4954: MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
4955: }
4956: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4957: return(0);
4958: }
4959: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4960: if (scall == MAT_INITIAL_MATRIX) {
4961: MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
4962: }
4963: MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
4964: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4965: return(0);
4966: }
4968: /*@
4969: MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
4970: mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
4971: with MatGetSize()
4973: Not Collective
4975: Input Parameters:
4976: + A - the matrix
4977: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4979: Output Parameter:
4980: . A_loc - the local sequential matrix generated
4982: Level: developer
4984: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed()
4986: @*/
4987: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4988: {
4990: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
4991: Mat_SeqAIJ *mat,*a,*b;
4992: PetscInt *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
4993: MatScalar *aa,*ba,*cam;
4994: PetscScalar *ca;
4995: PetscInt am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4996: PetscInt *ci,*cj,col,ncols_d,ncols_o,jo;
4997: PetscBool match;
4998: MPI_Comm comm;
4999: PetscMPIInt size;
5002: PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
5003: if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5004: PetscObjectGetComm((PetscObject)A,&comm);
5005: MPI_Comm_size(comm,&size);
5006: if (size == 1 && scall == MAT_REUSE_MATRIX) return(0);
5008: PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5009: a = (Mat_SeqAIJ*)(mpimat->A)->data;
5010: b = (Mat_SeqAIJ*)(mpimat->B)->data;
5011: ai = a->i; aj = a->j; bi = b->i; bj = b->j;
5012: aa = a->a; ba = b->a;
5013: if (scall == MAT_INITIAL_MATRIX) {
5014: if (size == 1) {
5015: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ai,aj,aa,A_loc);
5016: return(0);
5017: }
5019: PetscMalloc1(1+am,&ci);
5020: ci[0] = 0;
5021: for (i=0; i<am; i++) {
5022: ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
5023: }
5024: PetscMalloc1(1+ci[am],&cj);
5025: PetscMalloc1(1+ci[am],&ca);
5026: k = 0;
5027: for (i=0; i<am; i++) {
5028: ncols_o = bi[i+1] - bi[i];
5029: ncols_d = ai[i+1] - ai[i];
5030: /* off-diagonal portion of A */
5031: for (jo=0; jo<ncols_o; jo++) {
5032: col = cmap[*bj];
5033: if (col >= cstart) break;
5034: cj[k] = col; bj++;
5035: ca[k++] = *ba++;
5036: }
5037: /* diagonal portion of A */
5038: for (j=0; j<ncols_d; j++) {
5039: cj[k] = cstart + *aj++;
5040: ca[k++] = *aa++;
5041: }
5042: /* off-diagonal portion of A */
5043: for (j=jo; j<ncols_o; j++) {
5044: cj[k] = cmap[*bj++];
5045: ca[k++] = *ba++;
5046: }
5047: }
5048: /* put together the new matrix */
5049: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
5050: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5051: /* Since these are PETSc arrays, change flags to free them as necessary. */
5052: mat = (Mat_SeqAIJ*)(*A_loc)->data;
5053: mat->free_a = PETSC_TRUE;
5054: mat->free_ij = PETSC_TRUE;
5055: mat->nonew = 0;
5056: } else if (scall == MAT_REUSE_MATRIX) {
5057: mat=(Mat_SeqAIJ*)(*A_loc)->data;
5058: ci = mat->i; cj = mat->j; cam = mat->a;
5059: for (i=0; i<am; i++) {
5060: /* off-diagonal portion of A */
5061: ncols_o = bi[i+1] - bi[i];
5062: for (jo=0; jo<ncols_o; jo++) {
5063: col = cmap[*bj];
5064: if (col >= cstart) break;
5065: *cam++ = *ba++; bj++;
5066: }
5067: /* diagonal portion of A */
5068: ncols_d = ai[i+1] - ai[i];
5069: for (j=0; j<ncols_d; j++) *cam++ = *aa++;
5070: /* off-diagonal portion of A */
5071: for (j=jo; j<ncols_o; j++) {
5072: *cam++ = *ba++; bj++;
5073: }
5074: }
5075: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5076: PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5077: return(0);
5078: }
5080: /*@C
5081: MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns
5083: Not Collective
5085: Input Parameters:
5086: + A - the matrix
5087: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5088: - row, col - index sets of rows and columns to extract (or NULL)
5090: Output Parameter:
5091: . A_loc - the local sequential matrix generated
5093: Level: developer
5095: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()
5097: @*/
5098: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5099: {
5100: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
5102: PetscInt i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5103: IS isrowa,iscola;
5104: Mat *aloc;
5105: PetscBool match;
5108: PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5109: if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5110: PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5111: if (!row) {
5112: start = A->rmap->rstart; end = A->rmap->rend;
5113: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5114: } else {
5115: isrowa = *row;
5116: }
5117: if (!col) {
5118: start = A->cmap->rstart;
5119: cmap = a->garray;
5120: nzA = a->A->cmap->n;
5121: nzB = a->B->cmap->n;
5122: PetscMalloc1(nzA+nzB, &idx);
5123: ncols = 0;
5124: for (i=0; i<nzB; i++) {
5125: if (cmap[i] < start) idx[ncols++] = cmap[i];
5126: else break;
5127: }
5128: imark = i;
5129: for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5130: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5131: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5132: } else {
5133: iscola = *col;
5134: }
5135: if (scall != MAT_INITIAL_MATRIX) {
5136: PetscMalloc1(1,&aloc);
5137: aloc[0] = *A_loc;
5138: }
5139: MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5140: if (!col) { /* attach global id of condensed columns */
5141: PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5142: }
5143: *A_loc = aloc[0];
5144: PetscFree(aloc);
5145: if (!row) {
5146: ISDestroy(&isrowa);
5147: }
5148: if (!col) {
5149: ISDestroy(&iscola);
5150: }
5151: PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5152: return(0);
5153: }
5155: /*@C
5156: MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5158: Collective on Mat
5160: Input Parameters:
5161: + A,B - the matrices in mpiaij format
5162: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5163: - rowb, colb - index sets of rows and columns of B to extract (or NULL)
5165: Output Parameter:
5166: + rowb, colb - index sets of rows and columns of B to extract
5167: - B_seq - the sequential matrix generated
5169: Level: developer
5171: @*/
5172: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5173: {
5174: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
5176: PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5177: IS isrowb,iscolb;
5178: Mat *bseq=NULL;
5181: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5182: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5183: }
5184: PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);
5186: if (scall == MAT_INITIAL_MATRIX) {
5187: start = A->cmap->rstart;
5188: cmap = a->garray;
5189: nzA = a->A->cmap->n;
5190: nzB = a->B->cmap->n;
5191: PetscMalloc1(nzA+nzB, &idx);
5192: ncols = 0;
5193: for (i=0; i<nzB; i++) { /* row < local row index */
5194: if (cmap[i] < start) idx[ncols++] = cmap[i];
5195: else break;
5196: }
5197: imark = i;
5198: for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */
5199: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5200: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5201: ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5202: } else {
5203: if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5204: isrowb = *rowb; iscolb = *colb;
5205: PetscMalloc1(1,&bseq);
5206: bseq[0] = *B_seq;
5207: }
5208: MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5209: *B_seq = bseq[0];
5210: PetscFree(bseq);
5211: if (!rowb) {
5212: ISDestroy(&isrowb);
5213: } else {
5214: *rowb = isrowb;
5215: }
5216: if (!colb) {
5217: ISDestroy(&iscolb);
5218: } else {
5219: *colb = iscolb;
5220: }
5221: PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5222: return(0);
5223: }
5225: /*
5226: MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5227: of the OFF-DIAGONAL portion of local A
5229: Collective on Mat
5231: Input Parameters:
5232: + A,B - the matrices in mpiaij format
5233: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5235: Output Parameter:
5236: + startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5237: . startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5238: . bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5239: - B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N
5241: Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5242: for this matrix. This is not desirable..
5244: Level: developer
5246: */
5247: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5248: {
5249: PetscErrorCode ierr;
5250: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
5251: Mat_SeqAIJ *b_oth;
5252: VecScatter ctx;
5253: MPI_Comm comm;
5254: const PetscMPIInt *rprocs,*sprocs;
5255: const PetscInt *srow,*rstarts,*sstarts;
5256: PetscInt *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5257: PetscInt i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = 0,*sstartsj,len;
5258: PetscScalar *b_otha,*bufa,*bufA,*vals;
5259: MPI_Request *rwaits = NULL,*swaits = NULL;
5260: MPI_Status rstatus;
5261: PetscMPIInt jj,size,tag,rank,nsends_mpi,nrecvs_mpi;
5264: PetscObjectGetComm((PetscObject)A,&comm);
5265: MPI_Comm_size(comm,&size);
5267: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5268: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5269: }
5270: PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5271: MPI_Comm_rank(comm,&rank);
5273: if (size == 1) {
5274: startsj_s = NULL;
5275: bufa_ptr = NULL;
5276: *B_oth = NULL;
5277: return(0);
5278: }
5280: ctx = a->Mvctx;
5281: tag = ((PetscObject)ctx)->tag;
5283: if (ctx->inuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");
5284: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5285: /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5286: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5287: PetscMPIIntCast(nsends,&nsends_mpi);
5288: PetscMPIIntCast(nrecvs,&nrecvs_mpi);
5289: PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);
5291: if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5292: if (scall == MAT_INITIAL_MATRIX) {
5293: /* i-array */
5294: /*---------*/
5295: /* post receives */
5296: if (nrecvs) {PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);} /* rstarts can be NULL when nrecvs=0 */
5297: for (i=0; i<nrecvs; i++) {
5298: rowlen = rvalues + rstarts[i]*rbs;
5299: nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5300: MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5301: }
5303: /* pack the outgoing message */
5304: PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);
5306: sstartsj[0] = 0;
5307: rstartsj[0] = 0;
5308: len = 0; /* total length of j or a array to be sent */
5309: if (nsends) {
5310: k = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5311: PetscMalloc1(sbs*(sstarts[nsends]-sstarts[0]),&svalues);
5312: }
5313: for (i=0; i<nsends; i++) {
5314: rowlen = svalues + (sstarts[i]-sstarts[0])*sbs;
5315: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5316: for (j=0; j<nrows; j++) {
5317: row = srow[k] + B->rmap->range[rank]; /* global row idx */
5318: for (l=0; l<sbs; l++) {
5319: MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */
5321: rowlen[j*sbs+l] = ncols;
5323: len += ncols;
5324: MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5325: }
5326: k++;
5327: }
5328: MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);
5330: sstartsj[i+1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5331: }
5332: /* recvs and sends of i-array are completed */
5333: i = nrecvs;
5334: while (i--) {
5335: MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5336: }
5337: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5338: PetscFree(svalues);
5340: /* allocate buffers for sending j and a arrays */
5341: PetscMalloc1(len+1,&bufj);
5342: PetscMalloc1(len+1,&bufa);
5344: /* create i-array of B_oth */
5345: PetscMalloc1(aBn+2,&b_othi);
5347: b_othi[0] = 0;
5348: len = 0; /* total length of j or a array to be received */
5349: k = 0;
5350: for (i=0; i<nrecvs; i++) {
5351: rowlen = rvalues + (rstarts[i]-rstarts[0])*rbs;
5352: nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of rows to be received */
5353: for (j=0; j<nrows; j++) {
5354: b_othi[k+1] = b_othi[k] + rowlen[j];
5355: PetscIntSumError(rowlen[j],len,&len);
5356: k++;
5357: }
5358: rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5359: }
5360: PetscFree(rvalues);
5362: /* allocate space for j and a arrrays of B_oth */
5363: PetscMalloc1(b_othi[aBn]+1,&b_othj);
5364: PetscMalloc1(b_othi[aBn]+1,&b_otha);
5366: /* j-array */
5367: /*---------*/
5368: /* post receives of j-array */
5369: for (i=0; i<nrecvs; i++) {
5370: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5371: MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5372: }
5374: /* pack the outgoing message j-array */
5375: if (nsends) k = sstarts[0];
5376: for (i=0; i<nsends; i++) {
5377: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5378: bufJ = bufj+sstartsj[i];
5379: for (j=0; j<nrows; j++) {
5380: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5381: for (ll=0; ll<sbs; ll++) {
5382: MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5383: for (l=0; l<ncols; l++) {
5384: *bufJ++ = cols[l];
5385: }
5386: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5387: }
5388: }
5389: MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5390: }
5392: /* recvs and sends of j-array are completed */
5393: i = nrecvs;
5394: while (i--) {
5395: MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5396: }
5397: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5398: } else if (scall == MAT_REUSE_MATRIX) {
5399: sstartsj = *startsj_s;
5400: rstartsj = *startsj_r;
5401: bufa = *bufa_ptr;
5402: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
5403: b_otha = b_oth->a;
5404: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
5406: /* a-array */
5407: /*---------*/
5408: /* post receives of a-array */
5409: for (i=0; i<nrecvs; i++) {
5410: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5411: MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5412: }
5414: /* pack the outgoing message a-array */
5415: if (nsends) k = sstarts[0];
5416: for (i=0; i<nsends; i++) {
5417: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5418: bufA = bufa+sstartsj[i];
5419: for (j=0; j<nrows; j++) {
5420: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5421: for (ll=0; ll<sbs; ll++) {
5422: MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5423: for (l=0; l<ncols; l++) {
5424: *bufA++ = vals[l];
5425: }
5426: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5427: }
5428: }
5429: MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5430: }
5431: /* recvs and sends of a-array are completed */
5432: i = nrecvs;
5433: while (i--) {
5434: MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5435: }
5436: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5437: PetscFree2(rwaits,swaits);
5439: if (scall == MAT_INITIAL_MATRIX) {
5440: /* put together the new matrix */
5441: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);
5443: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5444: /* Since these are PETSc arrays, change flags to free them as necessary. */
5445: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
5446: b_oth->free_a = PETSC_TRUE;
5447: b_oth->free_ij = PETSC_TRUE;
5448: b_oth->nonew = 0;
5450: PetscFree(bufj);
5451: if (!startsj_s || !bufa_ptr) {
5452: PetscFree2(sstartsj,rstartsj);
5453: PetscFree(bufa_ptr);
5454: } else {
5455: *startsj_s = sstartsj;
5456: *startsj_r = rstartsj;
5457: *bufa_ptr = bufa;
5458: }
5459: }
5461: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE,&nsends,&sstarts,&srow,&sprocs,&sbs);
5462: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE,&nrecvs,&rstarts,NULL,&rprocs,&rbs);
5463: PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5464: return(0);
5465: }
5467: /*@C
5468: MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.
5470: Not Collective
5472: Input Parameters:
5473: . A - The matrix in mpiaij format
5475: Output Parameter:
5476: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5477: . colmap - A map from global column index to local index into lvec
5478: - multScatter - A scatter from the argument of a matrix-vector product to lvec
5480: Level: developer
5482: @*/
5483: #if defined(PETSC_USE_CTABLE)
5484: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5485: #else
5486: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5487: #endif
5488: {
5489: Mat_MPIAIJ *a;
5496: a = (Mat_MPIAIJ*) A->data;
5497: if (lvec) *lvec = a->lvec;
5498: if (colmap) *colmap = a->colmap;
5499: if (multScatter) *multScatter = a->Mvctx;
5500: return(0);
5501: }
5503: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5504: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5505: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat,MatType,MatReuse,Mat*);
5506: #if defined(PETSC_HAVE_MKL_SPARSE)
5507: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5508: #endif
5509: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5510: #if defined(PETSC_HAVE_ELEMENTAL)
5511: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5512: #endif
5513: #if defined(PETSC_HAVE_HYPRE)
5514: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5515: PETSC_INTERN PetscErrorCode MatMatMatMult_Transpose_AIJ_AIJ(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
5516: #endif
5517: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat,MatType,MatReuse,Mat*);
5518: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat,MatType,MatReuse,Mat*);
5519: PETSC_INTERN PetscErrorCode MatPtAP_IS_XAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);
5521: /*
5522: Computes (B'*A')' since computing B*A directly is untenable
5524: n p p
5525: ( ) ( ) ( )
5526: m ( A ) * n ( B ) = m ( C )
5527: ( ) ( ) ( )
5529: */
5530: PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5531: {
5533: Mat At,Bt,Ct;
5536: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5537: MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5538: MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);
5539: MatDestroy(&At);
5540: MatDestroy(&Bt);
5541: MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5542: MatDestroy(&Ct);
5543: return(0);
5544: }
5546: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
5547: {
5549: PetscInt m=A->rmap->n,n=B->cmap->n;
5550: Mat Cmat;
5553: if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5554: MatCreate(PetscObjectComm((PetscObject)A),&Cmat);
5555: MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
5556: MatSetBlockSizesFromMats(Cmat,A,B);
5557: MatSetType(Cmat,MATMPIDENSE);
5558: MatMPIDenseSetPreallocation(Cmat,NULL);
5559: MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);
5560: MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);
5562: Cmat->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
5564: *C = Cmat;
5565: return(0);
5566: }
5568: /* ----------------------------------------------------------------*/
5569: PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
5570: {
5574: if (scall == MAT_INITIAL_MATRIX) {
5575: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
5576: MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);
5577: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
5578: }
5579: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
5580: MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);
5581: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
5582: return(0);
5583: }
5585: /*MC
5586: MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
5588: Options Database Keys:
5589: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()
5591: Level: beginner
5593: .seealso: MatCreateAIJ()
5594: M*/
5596: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
5597: {
5598: Mat_MPIAIJ *b;
5600: PetscMPIInt size;
5603: MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
5605: PetscNewLog(B,&b);
5606: B->data = (void*)b;
5607: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
5608: B->assembled = PETSC_FALSE;
5609: B->insertmode = NOT_SET_VALUES;
5610: b->size = size;
5612: MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);
5614: /* build cache for off array entries formed */
5615: MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);
5617: b->donotstash = PETSC_FALSE;
5618: b->colmap = 0;
5619: b->garray = 0;
5620: b->roworiented = PETSC_TRUE;
5622: /* stuff used for matrix vector multiply */
5623: b->lvec = NULL;
5624: b->Mvctx = NULL;
5626: /* stuff for MatGetRow() */
5627: b->rowindices = 0;
5628: b->rowvalues = 0;
5629: b->getrowactive = PETSC_FALSE;
5631: /* flexible pointer used in CUSP/CUSPARSE classes */
5632: b->spptr = NULL;
5634: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
5635: PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
5636: PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
5637: PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
5638: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
5639: PetscObjectComposeFunction((PetscObject)B,"MatResetPreallocation_C",MatResetPreallocation_MPIAIJ);
5640: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
5641: PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
5642: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
5643: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijsell_C",MatConvert_MPIAIJ_MPIAIJSELL);
5644: #if defined(PETSC_HAVE_MKL_SPARSE)
5645: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
5646: #endif
5647: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
5648: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
5649: #if defined(PETSC_HAVE_ELEMENTAL)
5650: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
5651: #endif
5652: #if defined(PETSC_HAVE_HYPRE)
5653: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
5654: #endif
5655: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_XAIJ_IS);
5656: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisell_C",MatConvert_MPIAIJ_MPISELL);
5657: PetscObjectComposeFunction((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",MatMatMult_MPIDense_MPIAIJ);
5658: PetscObjectComposeFunction((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",MatMatMultSymbolic_MPIDense_MPIAIJ);
5659: PetscObjectComposeFunction((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",MatMatMultNumeric_MPIDense_MPIAIJ);
5660: #if defined(PETSC_HAVE_HYPRE)
5661: PetscObjectComposeFunction((PetscObject)B,"MatMatMatMult_transpose_mpiaij_mpiaij_C",MatMatMatMult_Transpose_AIJ_AIJ);
5662: #endif
5663: PetscObjectComposeFunction((PetscObject)B,"MatPtAP_is_mpiaij_C",MatPtAP_IS_XAIJ);
5664: PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
5665: return(0);
5666: }
5668: /*@C
5669: MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5670: and "off-diagonal" part of the matrix in CSR format.
5672: Collective on MPI_Comm
5674: Input Parameters:
5675: + comm - MPI communicator
5676: . m - number of local rows (Cannot be PETSC_DECIDE)
5677: . n - This value should be the same as the local size used in creating the
5678: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5679: calculated if N is given) For square matrices n is almost always m.
5680: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5681: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5682: . i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
5683: . j - column indices
5684: . a - matrix values
5685: . oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
5686: . oj - column indices
5687: - oa - matrix values
5689: Output Parameter:
5690: . mat - the matrix
5692: Level: advanced
5694: Notes:
5695: The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
5696: must free the arrays once the matrix has been destroyed and not before.
5698: The i and j indices are 0 based
5700: See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix
5702: This sets local rows and cannot be used to set off-processor values.
5704: Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
5705: legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
5706: not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
5707: the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
5708: keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
5709: communication if it is known that only local entries will be set.
5711: .keywords: matrix, aij, compressed row, sparse, parallel
5713: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5714: MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
5715: @*/
5716: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5717: {
5719: Mat_MPIAIJ *maij;
5722: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5723: if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5724: if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5725: MatCreate(comm,mat);
5726: MatSetSizes(*mat,m,n,M,N);
5727: MatSetType(*mat,MATMPIAIJ);
5728: maij = (Mat_MPIAIJ*) (*mat)->data;
5730: (*mat)->preallocated = PETSC_TRUE;
5732: PetscLayoutSetUp((*mat)->rmap);
5733: PetscLayoutSetUp((*mat)->cmap);
5735: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
5736: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);
5738: MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);
5739: MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);
5740: MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);
5741: MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);
5743: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
5744: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
5745: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
5746: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
5747: MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
5748: return(0);
5749: }
5751: /*
5752: Special version for direct calls from Fortran
5753: */
5754: #include <petsc/private/fortranimpl.h>
5756: /* Change these macros so can be used in void function */
5757: #undef CHKERRQ
5758: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
5759: #undef SETERRQ2
5760: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
5761: #undef SETERRQ3
5762: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
5763: #undef SETERRQ
5764: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)
5766: #if defined(PETSC_HAVE_FORTRAN_CAPS)
5767: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
5768: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
5769: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
5770: #else
5771: #endif
5772: PETSC_EXTERN void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
5773: {
5774: Mat mat = *mmat;
5775: PetscInt m = *mm, n = *mn;
5776: InsertMode addv = *maddv;
5777: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
5778: PetscScalar value;
5781: MatCheckPreallocated(mat,1);
5782: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
5784: #if defined(PETSC_USE_DEBUG)
5785: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
5786: #endif
5787: {
5788: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
5789: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
5790: PetscBool roworiented = aij->roworiented;
5792: /* Some Variables required in the macro */
5793: Mat A = aij->A;
5794: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
5795: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
5796: MatScalar *aa = a->a;
5797: PetscBool ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
5798: Mat B = aij->B;
5799: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
5800: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
5801: MatScalar *ba = b->a;
5803: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
5804: PetscInt nonew = a->nonew;
5805: MatScalar *ap1,*ap2;
5808: for (i=0; i<m; i++) {
5809: if (im[i] < 0) continue;
5810: #if defined(PETSC_USE_DEBUG)
5811: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
5812: #endif
5813: if (im[i] >= rstart && im[i] < rend) {
5814: row = im[i] - rstart;
5815: lastcol1 = -1;
5816: rp1 = aj + ai[row];
5817: ap1 = aa + ai[row];
5818: rmax1 = aimax[row];
5819: nrow1 = ailen[row];
5820: low1 = 0;
5821: high1 = nrow1;
5822: lastcol2 = -1;
5823: rp2 = bj + bi[row];
5824: ap2 = ba + bi[row];
5825: rmax2 = bimax[row];
5826: nrow2 = bilen[row];
5827: low2 = 0;
5828: high2 = nrow2;
5830: for (j=0; j<n; j++) {
5831: if (roworiented) value = v[i*n+j];
5832: else value = v[i+j*m];
5833: if (in[j] >= cstart && in[j] < cend) {
5834: col = in[j] - cstart;
5835: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
5836: MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
5837: } else if (in[j] < 0) continue;
5838: #if defined(PETSC_USE_DEBUG)
5839: /* extra brace on SETERRQ2() is required for --with-errorchecking=0 - due to the next 'else' clause */
5840: else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
5841: #endif
5842: else {
5843: if (mat->was_assembled) {
5844: if (!aij->colmap) {
5845: MatCreateColmap_MPIAIJ_Private(mat);
5846: }
5847: #if defined(PETSC_USE_CTABLE)
5848: PetscTableFind(aij->colmap,in[j]+1,&col);
5849: col--;
5850: #else
5851: col = aij->colmap[in[j]] - 1;
5852: #endif
5853: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
5854: if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
5855: MatDisAssemble_MPIAIJ(mat);
5856: col = in[j];
5857: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
5858: B = aij->B;
5859: b = (Mat_SeqAIJ*)B->data;
5860: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
5861: rp2 = bj + bi[row];
5862: ap2 = ba + bi[row];
5863: rmax2 = bimax[row];
5864: nrow2 = bilen[row];
5865: low2 = 0;
5866: high2 = nrow2;
5867: bm = aij->B->rmap->n;
5868: ba = b->a;
5869: }
5870: } else col = in[j];
5871: MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
5872: }
5873: }
5874: } else if (!aij->donotstash) {
5875: if (roworiented) {
5876: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
5877: } else {
5878: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
5879: }
5880: }
5881: }
5882: }
5883: PetscFunctionReturnVoid();
5884: }