Actual source code: mpimatmatmult.c
petsc-3.8.3 2017-12-09
2: /*
3: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4: C = A * B
5: */
6: #include <../src/mat/impls/aij/seq/aij.h>
7: #include <../src/mat/utils/freespace.h>
8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
9: #include <petscbt.h>
10: #include <../src/mat/impls/dense/mpi/mpidense.h>
11: #include <petsc/private/vecimpl.h>
13: #if defined(PETSC_HAVE_HYPRE)
14: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat*);
15: #endif
17: PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
18: {
20: #if defined(PETSC_HAVE_HYPRE)
21: const char *algTypes[3] = {"scalable","nonscalable","hypre"};
22: PetscInt nalg = 3;
23: #else
24: const char *algTypes[2] = {"scalable","nonscalable"};
25: PetscInt nalg = 2;
26: #endif
27: PetscInt alg = 1; /* set nonscalable algorithm as default */
28: MPI_Comm comm;
29: PetscBool flg;
32: if (scall == MAT_INITIAL_MATRIX) {
33: PetscObjectGetComm((PetscObject)A,&comm);
34: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
36: PetscObjectOptionsBegin((PetscObject)A);
37: PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
38: PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[1],&alg,&flg);
39: PetscOptionsEnd();
41: if (!flg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
42: MatInfo Ainfo,Binfo;
43: PetscInt nz_local;
44: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
46: MatGetInfo(A,MAT_LOCAL,&Ainfo);
47: MatGetInfo(B,MAT_LOCAL,&Binfo);
48: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
50: if (B->cmap->N > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
51: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
53: if (alg_scalable) {
54: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
55: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,fill*nz_local);
56: }
57: }
59: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
60: switch (alg) {
61: case 1:
62: MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
63: break;
64: #if defined(PETSC_HAVE_HYPRE)
65: case 2:
66: MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);
67: break;
68: #endif
69: default:
70: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
71: break;
72: }
73: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
74: }
75: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
76: (*(*C)->ops->matmultnumeric)(A,B,*C);
77: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
78: return(0);
79: }
81: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
82: {
84: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
85: Mat_PtAPMPI *ptap = a->ptap;
88: PetscFree2(ptap->startsj_s,ptap->startsj_r);
89: PetscFree(ptap->bufa);
90: MatDestroy(&ptap->P_loc);
91: MatDestroy(&ptap->P_oth);
92: MatDestroy(&ptap->Pt);
93: PetscFree(ptap->api);
94: PetscFree(ptap->apj);
95: PetscFree(ptap->apa);
96: ptap->destroy(A);
97: PetscFree(ptap);
98: return(0);
99: }
101: PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M)
102: {
104: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
105: Mat_PtAPMPI *ptap = a->ptap;
108: (*ptap->duplicate)(A,op,M);
110: (*M)->ops->destroy = ptap->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */
111: (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */
112: return(0);
113: }
115: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C)
116: {
118: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
119: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
120: Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
121: PetscScalar *cda=cd->a,*coa=co->a;
122: Mat_SeqAIJ *p_loc,*p_oth;
123: PetscScalar *apa,*ca;
124: PetscInt cm =C->rmap->n;
125: Mat_PtAPMPI *ptap=c->ptap;
126: PetscInt *api,*apj,*apJ,i,k;
127: PetscInt cstart=C->cmap->rstart;
128: PetscInt cdnz,conz,k0,k1;
129: MPI_Comm comm;
130: PetscMPIInt size;
133: PetscObjectGetComm((PetscObject)A,&comm);
134: MPI_Comm_size(comm,&size);
136: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
137: /*-----------------------------------------------------*/
138: /* update numerical values of P_oth and P_loc */
139: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
140: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
142: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
143: /*----------------------------------------------------------*/
144: /* get data from symbolic products */
145: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
146: p_oth = NULL;
147: if (size >1) {
148: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
149: }
151: /* get apa for storing dense row A[i,:]*P */
152: apa = ptap->apa;
154: api = ptap->api;
155: apj = ptap->apj;
156: for (i=0; i<cm; i++) {
157: /* compute apa = A[i,:]*P */
158: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
160: /* set values in C */
161: apJ = apj + api[i];
162: cdnz = cd->i[i+1] - cd->i[i];
163: conz = co->i[i+1] - co->i[i];
165: /* 1st off-diagoanl part of C */
166: ca = coa + co->i[i];
167: k = 0;
168: for (k0=0; k0<conz; k0++) {
169: if (apJ[k] >= cstart) break;
170: ca[k0] = apa[apJ[k]];
171: apa[apJ[k++]] = 0.0;
172: }
174: /* diagonal part of C */
175: ca = cda + cd->i[i];
176: for (k1=0; k1<cdnz; k1++) {
177: ca[k1] = apa[apJ[k]];
178: apa[apJ[k++]] = 0.0;
179: }
181: /* 2nd off-diagoanl part of C */
182: ca = coa + co->i[i];
183: for (; k0<conz; k0++) {
184: ca[k0] = apa[apJ[k]];
185: apa[apJ[k++]] = 0.0;
186: }
187: }
188: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
189: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
190: return(0);
191: }
193: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat *C)
194: {
195: PetscErrorCode ierr;
196: MPI_Comm comm;
197: PetscMPIInt size;
198: Mat Cmpi;
199: Mat_PtAPMPI *ptap;
200: PetscFreeSpaceList free_space=NULL,current_space=NULL;
201: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c;
202: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
203: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
204: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
205: PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
206: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
207: PetscBT lnkbt;
208: PetscScalar *apa;
209: PetscReal afill;
212: PetscObjectGetComm((PetscObject)A,&comm);
213: MPI_Comm_size(comm,&size);
215: /* create struct Mat_PtAPMPI and attached it to C later */
216: PetscNew(&ptap);
218: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
219: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
221: /* get P_loc by taking all local rows of P */
222: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
224: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
225: pi_loc = p_loc->i; pj_loc = p_loc->j;
226: if (size > 1) {
227: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
228: pi_oth = p_oth->i; pj_oth = p_oth->j;
229: } else {
230: p_oth = NULL;
231: pi_oth = NULL; pj_oth = NULL;
232: }
234: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
235: /*-------------------------------------------------------------------*/
236: PetscMalloc1(am+2,&api);
237: ptap->api = api;
238: api[0] = 0;
240: /* create and initialize a linked list */
241: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
243: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
244: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
245: current_space = free_space;
247: MatPreallocateInitialize(comm,am,pn,dnz,onz);
248: for (i=0; i<am; i++) {
249: /* diagonal portion of A */
250: nzi = adi[i+1] - adi[i];
251: for (j=0; j<nzi; j++) {
252: row = *adj++;
253: pnz = pi_loc[row+1] - pi_loc[row];
254: Jptr = pj_loc + pi_loc[row];
255: /* add non-zero cols of P into the sorted linked list lnk */
256: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
257: }
258: /* off-diagonal portion of A */
259: nzi = aoi[i+1] - aoi[i];
260: for (j=0; j<nzi; j++) {
261: row = *aoj++;
262: pnz = pi_oth[row+1] - pi_oth[row];
263: Jptr = pj_oth + pi_oth[row];
264: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
265: }
267: apnz = lnk[0];
268: api[i+1] = api[i] + apnz;
270: /* if free space is not available, double the total space in the list */
271: if (current_space->local_remaining<apnz) {
272: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
273: nspacedouble++;
274: }
276: /* Copy data into free space, then initialize lnk */
277: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
278: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
280: current_space->array += apnz;
281: current_space->local_used += apnz;
282: current_space->local_remaining -= apnz;
283: }
285: /* Allocate space for apj, initialize apj, and */
286: /* destroy list of free space and other temporary array(s) */
287: PetscMalloc1(api[am]+1,&ptap->apj);
288: apj = ptap->apj;
289: PetscFreeSpaceContiguous(&free_space,ptap->apj);
290: PetscLLDestroy(lnk,lnkbt);
292: /* malloc apa to store dense row A[i,:]*P */
293: PetscCalloc1(pN,&apa);
295: ptap->apa = apa;
297: /* create and assemble symbolic parallel matrix Cmpi */
298: /*----------------------------------------------------*/
299: MatCreate(comm,&Cmpi);
300: MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
301: MatSetBlockSizesFromMats(Cmpi,A,P);
303: MatSetType(Cmpi,MATMPIAIJ);
304: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
305: MatPreallocateFinalize(dnz,onz);
306: for (i=0; i<am; i++) {
307: row = i + rstart;
308: apnz = api[i+1] - api[i];
309: MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);
310: apj += apnz;
311: }
312: MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);
313: MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);
315: ptap->destroy = Cmpi->ops->destroy;
316: ptap->duplicate = Cmpi->ops->duplicate;
317: Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
318: Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
319: Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
321: /* attach the supporting struct to Cmpi for reuse */
322: c = (Mat_MPIAIJ*)Cmpi->data;
323: c->ptap = ptap;
325: *C = Cmpi;
327: /* set MatInfo */
328: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
329: if (afill < 1.0) afill = 1.0;
330: Cmpi->info.mallocs = nspacedouble;
331: Cmpi->info.fill_ratio_given = fill;
332: Cmpi->info.fill_ratio_needed = afill;
334: #if defined(PETSC_USE_INFO)
335: if (api[am]) {
336: PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
337: PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
338: } else {
339: PetscInfo(Cmpi,"Empty matrix product\n");
340: }
341: #endif
342: return(0);
343: }
345: PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
346: {
350: if (scall == MAT_INITIAL_MATRIX) {
351: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
352: MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);
353: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
354: }
355: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
356: MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);
357: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
358: return(0);
359: }
361: typedef struct {
362: Mat workB;
363: PetscScalar *rvalues,*svalues;
364: MPI_Request *rwaits,*swaits;
365: } MPIAIJ_MPIDense;
367: PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
368: {
369: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
370: PetscErrorCode ierr;
373: MatDestroy(&contents->workB);
374: PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);
375: PetscFree(contents);
376: return(0);
377: }
379: /*
380: This is a "dummy function" that handles the case where matrix C was created as a dense matrix
381: directly by the user and passed to MatMatMult() with the MAT_REUSE_MATRIX option
383: It is the same as MatMatMultSymbolic_MPIAIJ_MPIDense() except does not create C
384: */
385: PetscErrorCode MatMatMultNumeric_MPIDense(Mat A,Mat B,Mat C)
386: {
387: PetscErrorCode ierr;
388: PetscBool flg;
389: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data;
390: PetscInt nz = aij->B->cmap->n;
391: PetscContainer container;
392: MPIAIJ_MPIDense *contents;
393: VecScatter ctx = aij->Mvctx;
394: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
395: VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata;
398: PetscObjectTypeCompare((PetscObject)B,MATMPIDENSE,&flg);
399: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Second matrix must be mpidense");
401: /* Handle case where where user provided the final C matrix rather than calling MatMatMult() with MAT_INITIAL_MATRIX*/
402: PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);
403: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"First matrix must be MPIAIJ");
405: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
407: PetscNew(&contents);
408: /* Create work matrix used to store off processor rows of B needed for local product */
409: MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);
410: /* Create work arrays needed */
411: PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues,
412: B->cmap->N*to->starts[to->n],&contents->svalues,
413: from->n,&contents->rwaits,
414: to->n,&contents->swaits);
416: PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);
417: PetscContainerSetPointer(container,contents);
418: PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);
419: PetscObjectCompose((PetscObject)C,"workB",(PetscObject)container);
420: PetscContainerDestroy(&container);
422: (*C->ops->matmultnumeric)(A,B,C);
423: return(0);
424: }
426: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
427: {
428: PetscErrorCode ierr;
429: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data;
430: PetscInt nz = aij->B->cmap->n;
431: PetscContainer container;
432: MPIAIJ_MPIDense *contents;
433: VecScatter ctx = aij->Mvctx;
434: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
435: VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata;
436: PetscInt m = A->rmap->n,n=B->cmap->n;
439: MatCreate(PetscObjectComm((PetscObject)B),C);
440: MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);
441: MatSetBlockSizesFromMats(*C,A,B);
442: MatSetType(*C,MATMPIDENSE);
443: MatMPIDenseSetPreallocation(*C,NULL);
444: MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);
445: MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);
447: (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
449: PetscNew(&contents);
450: /* Create work matrix used to store off processor rows of B needed for local product */
451: MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);
452: /* Create work arrays needed */
453: PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues,
454: B->cmap->N*to->starts[to->n],&contents->svalues,
455: from->n,&contents->rwaits,
456: to->n,&contents->swaits);
458: PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);
459: PetscContainerSetPointer(container,contents);
460: PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);
461: PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);
462: PetscContainerDestroy(&container);
463: return(0);
464: }
466: /*
467: Performs an efficient scatter on the rows of B needed by this process; this is
468: a modification of the VecScatterBegin_() routines.
469: */
470: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
471: {
472: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
473: PetscErrorCode ierr;
474: PetscScalar *b,*w,*svalues,*rvalues;
475: VecScatter ctx = aij->Mvctx;
476: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
477: VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata;
478: PetscInt i,j,k;
479: PetscInt *sindices,*sstarts,*rindices,*rstarts;
480: PetscMPIInt *sprocs,*rprocs,nrecvs;
481: MPI_Request *swaits,*rwaits;
482: MPI_Comm comm;
483: PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
484: MPI_Status status;
485: MPIAIJ_MPIDense *contents;
486: PetscContainer container;
487: Mat workB;
490: PetscObjectGetComm((PetscObject)A,&comm);
491: PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);
492: if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist");
493: PetscContainerGetPointer(container,(void**)&contents);
495: workB = *outworkB = contents->workB;
496: if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
497: sindices = to->indices;
498: sstarts = to->starts;
499: sprocs = to->procs;
500: swaits = contents->swaits;
501: svalues = contents->svalues;
503: rindices = from->indices;
504: rstarts = from->starts;
505: rprocs = from->procs;
506: rwaits = contents->rwaits;
507: rvalues = contents->rvalues;
509: MatDenseGetArray(B,&b);
510: MatDenseGetArray(workB,&w);
512: for (i=0; i<from->n; i++) {
513: MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
514: }
516: for (i=0; i<to->n; i++) {
517: /* pack a message at a time */
518: for (j=0; j<sstarts[i+1]-sstarts[i]; j++) {
519: for (k=0; k<ncols; k++) {
520: svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
521: }
522: }
523: MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
524: }
526: nrecvs = from->n;
527: while (nrecvs) {
528: MPI_Waitany(from->n,rwaits,&imdex,&status);
529: nrecvs--;
530: /* unpack a message at a time */
531: for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++) {
532: for (k=0; k<ncols; k++) {
533: w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
534: }
535: }
536: }
537: if (to->n) {MPI_Waitall(to->n,swaits,to->sstatus);}
539: MatDenseRestoreArray(B,&b);
540: MatDenseRestoreArray(workB,&w);
541: MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);
542: MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);
543: return(0);
544: }
545: extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat);
547: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
548: {
550: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
551: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
552: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
553: Mat workB;
556: /* diagonal block of A times all local rows of B*/
557: MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);
559: /* get off processor parts of B needed to complete the product */
560: MatMPIDenseScatter(A,B,C,&workB);
562: /* off-diagonal block of A times nonlocal rows of B */
563: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);
564: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
565: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
566: return(0);
567: }
569: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
570: {
572: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
573: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
574: Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
575: PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj;
576: PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a;
577: Mat_SeqAIJ *p_loc,*p_oth;
578: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
579: PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca;
580: PetscInt cm = C->rmap->n,anz,pnz;
581: Mat_PtAPMPI *ptap = c->ptap;
582: PetscScalar *apa_sparse = ptap->apa;
583: PetscInt *api,*apj,*apJ,i,j,k,row;
584: PetscInt cstart = C->cmap->rstart;
585: PetscInt cdnz,conz,k0,k1,nextp;
586: MPI_Comm comm;
587: PetscMPIInt size;
590: PetscObjectGetComm((PetscObject)A,&comm);
591: MPI_Comm_size(comm,&size);
593: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
594: /*-----------------------------------------------------*/
595: /* update numerical values of P_oth and P_loc */
596: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
597: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
599: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
600: /*----------------------------------------------------------*/
601: /* get data from symbolic products */
602: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
603: pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a;
604: if (size >1) {
605: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
606: pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a;
607: } else {
608: p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL;
609: }
611: api = ptap->api;
612: apj = ptap->apj;
613: for (i=0; i<cm; i++) {
614: apJ = apj + api[i];
616: /* diagonal portion of A */
617: anz = adi[i+1] - adi[i];
618: adj = ad->j + adi[i];
619: ada = ad->a + adi[i];
620: for (j=0; j<anz; j++) {
621: row = adj[j];
622: pnz = pi_loc[row+1] - pi_loc[row];
623: pj = pj_loc + pi_loc[row];
624: pa = pa_loc + pi_loc[row];
625: /* perform sparse axpy */
626: valtmp = ada[j];
627: nextp = 0;
628: for (k=0; nextp<pnz; k++) {
629: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
630: apa_sparse[k] += valtmp*pa[nextp++];
631: }
632: }
633: PetscLogFlops(2.0*pnz);
634: }
636: /* off-diagonal portion of A */
637: anz = aoi[i+1] - aoi[i];
638: aoj = ao->j + aoi[i];
639: aoa = ao->a + aoi[i];
640: for (j=0; j<anz; j++) {
641: row = aoj[j];
642: pnz = pi_oth[row+1] - pi_oth[row];
643: pj = pj_oth + pi_oth[row];
644: pa = pa_oth + pi_oth[row];
645: /* perform sparse axpy */
646: valtmp = aoa[j];
647: nextp = 0;
648: for (k=0; nextp<pnz; k++) {
649: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
650: apa_sparse[k] += valtmp*pa[nextp++];
651: }
652: }
653: PetscLogFlops(2.0*pnz);
654: }
656: /* set values in C */
657: cdnz = cd->i[i+1] - cd->i[i];
658: conz = co->i[i+1] - co->i[i];
660: /* 1st off-diagoanl part of C */
661: ca = coa + co->i[i];
662: k = 0;
663: for (k0=0; k0<conz; k0++) {
664: if (apJ[k] >= cstart) break;
665: ca[k0] = apa_sparse[k];
666: apa_sparse[k] = 0.0;
667: k++;
668: }
670: /* diagonal part of C */
671: ca = cda + cd->i[i];
672: for (k1=0; k1<cdnz; k1++) {
673: ca[k1] = apa_sparse[k];
674: apa_sparse[k] = 0.0;
675: k++;
676: }
678: /* 2nd off-diagoanl part of C */
679: ca = coa + co->i[i];
680: for (; k0<conz; k0++) {
681: ca[k0] = apa_sparse[k];
682: apa_sparse[k] = 0.0;
683: k++;
684: }
685: }
686: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
687: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
688: return(0);
689: }
691: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
692: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
693: {
694: PetscErrorCode ierr;
695: MPI_Comm comm;
696: PetscMPIInt size;
697: Mat Cmpi;
698: Mat_PtAPMPI *ptap;
699: PetscFreeSpaceList free_space = NULL,current_space=NULL;
700: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c;
701: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
702: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
703: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
704: PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max;
705: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
706: PetscReal afill;
707: PetscScalar *apa;
708: PetscTable ta;
711: PetscObjectGetComm((PetscObject)A,&comm);
712: MPI_Comm_size(comm,&size);
714: /* create struct Mat_PtAPMPI and attached it to C later */
715: PetscNew(&ptap);
717: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
718: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
720: /* get P_loc by taking all local rows of P */
721: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
723: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
724: pi_loc = p_loc->i; pj_loc = p_loc->j;
725: if (size > 1) {
726: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
727: pi_oth = p_oth->i; pj_oth = p_oth->j;
728: } else {
729: p_oth = NULL;
730: pi_oth = NULL; pj_oth = NULL;
731: }
733: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
734: /*-------------------------------------------------------------------*/
735: PetscMalloc1(am+2,&api);
736: ptap->api = api;
737: api[0] = 0;
739: /* create and initialize a linked list */
740: PetscTableCreate(pn,pN,&ta);
742: /* Calculate apnz_max */
743: apnz_max = 0;
744: for (i=0; i<am; i++) {
745: PetscTableRemoveAll(ta);
746: /* diagonal portion of A */
747: nzi = adi[i+1] - adi[i];
748: Jptr = adj+adi[i]; /* cols of A_diag */
749: MatMergeRows_SeqAIJ(p_loc,nzi,Jptr,ta);
750: PetscTableGetCount(ta,&apnz);
751: if (apnz_max < apnz) apnz_max = apnz;
753: /* off-diagonal portion of A */
754: nzi = aoi[i+1] - aoi[i];
755: Jptr = aoj+aoi[i]; /* cols of A_off */
756: MatMergeRows_SeqAIJ(p_oth,nzi,Jptr,ta);
757: PetscTableGetCount(ta,&apnz);
758: if (apnz_max < apnz) apnz_max = apnz;
759: }
760: PetscTableDestroy(&ta);
762: PetscLLCondensedCreate_Scalable(apnz_max,&lnk);
764: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
765: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
766: current_space = free_space;
767: MatPreallocateInitialize(comm,am,pn,dnz,onz);
768: for (i=0; i<am; i++) {
769: /* diagonal portion of A */
770: nzi = adi[i+1] - adi[i];
771: for (j=0; j<nzi; j++) {
772: row = *adj++;
773: pnz = pi_loc[row+1] - pi_loc[row];
774: Jptr = pj_loc + pi_loc[row];
775: /* add non-zero cols of P into the sorted linked list lnk */
776: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
777: }
778: /* off-diagonal portion of A */
779: nzi = aoi[i+1] - aoi[i];
780: for (j=0; j<nzi; j++) {
781: row = *aoj++;
782: pnz = pi_oth[row+1] - pi_oth[row];
783: Jptr = pj_oth + pi_oth[row];
784: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
785: }
787: apnz = *lnk;
788: api[i+1] = api[i] + apnz;
790: /* if free space is not available, double the total space in the list */
791: if (current_space->local_remaining<apnz) {
792: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
793: nspacedouble++;
794: }
796: /* Copy data into free space, then initialize lnk */
797: PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);
798: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
800: current_space->array += apnz;
801: current_space->local_used += apnz;
802: current_space->local_remaining -= apnz;
803: }
805: /* Allocate space for apj, initialize apj, and */
806: /* destroy list of free space and other temporary array(s) */
807: PetscMalloc1(api[am]+1,&ptap->apj);
808: apj = ptap->apj;
809: PetscFreeSpaceContiguous(&free_space,ptap->apj);
810: PetscLLCondensedDestroy_Scalable(lnk);
812: /* create and assemble symbolic parallel matrix Cmpi */
813: /*----------------------------------------------------*/
814: MatCreate(comm,&Cmpi);
815: MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
816: MatSetBlockSizesFromMats(Cmpi,A,P);
817: MatSetType(Cmpi,MATMPIAIJ);
818: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
819: MatPreallocateFinalize(dnz,onz);
821: /* malloc apa for assembly Cmpi */
822: PetscCalloc1(apnz_max,&apa);
824: ptap->apa = apa;
825: for (i=0; i<am; i++) {
826: row = i + rstart;
827: apnz = api[i+1] - api[i];
828: MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);
829: apj += apnz;
830: }
831: MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);
832: MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);
834: ptap->destroy = Cmpi->ops->destroy;
835: ptap->duplicate = Cmpi->ops->duplicate;
836: Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
837: Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
838: Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
840: /* attach the supporting struct to Cmpi for reuse */
841: c = (Mat_MPIAIJ*)Cmpi->data;
842: c->ptap = ptap;
844: *C = Cmpi;
846: /* set MatInfo */
847: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
848: if (afill < 1.0) afill = 1.0;
849: Cmpi->info.mallocs = nspacedouble;
850: Cmpi->info.fill_ratio_given = fill;
851: Cmpi->info.fill_ratio_needed = afill;
853: #if defined(PETSC_USE_INFO)
854: if (api[am]) {
855: PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
856: PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
857: } else {
858: PetscInfo(Cmpi,"Empty matrix product\n");
859: }
860: #endif
861: return(0);
862: }
864: /*-------------------------------------------------------------------------*/
865: PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat P,Mat A,MatReuse scall,PetscReal fill,Mat *C)
866: {
868: const char *algTypes[3] = {"scalable","nonscalable","matmatmult"};
869: PetscInt alg=0; /* set default algorithm */
872: if (scall == MAT_INITIAL_MATRIX) {
873: PetscObjectOptionsBegin((PetscObject)A);
874: PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
875: PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,3,algTypes[0],&alg,NULL);
876: PetscOptionsEnd();
878: PetscLogEventBegin(MAT_TransposeMatMultSymbolic,P,A,0,0);
879: switch (alg) {
880: case 1:
881: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(P,A,fill,C);
882: break;
883: case 2:
884: {
885: Mat Pt;
886: Mat_PtAPMPI *ptap;
887: Mat_MPIAIJ *c;
888: MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
889: MatMatMult(Pt,A,MAT_INITIAL_MATRIX,fill,C);
890: c = (Mat_MPIAIJ*)(*C)->data;
891: ptap = c->ptap;
892: ptap->Pt = Pt;
893: (*C)->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
894: return(0);
895: }
896: break;
897: default:
898: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);
899: break;
900: }
901: PetscLogEventEnd(MAT_TransposeMatMultSymbolic,P,A,0,0);
902: }
903: PetscLogEventBegin(MAT_TransposeMatMultNumeric,P,A,0,0);
904: (*(*C)->ops->mattransposemultnumeric)(P,A,*C);
905: PetscLogEventEnd(MAT_TransposeMatMultNumeric,P,A,0,0);
906: return(0);
907: }
909: /* This routine only works when scall=MAT_REUSE_MATRIX! */
910: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C)
911: {
913: Mat_MPIAIJ *c=(Mat_MPIAIJ*)C->data;
914: Mat_PtAPMPI *ptap= c->ptap;
915: Mat Pt=ptap->Pt;
918: MatTranspose(P,MAT_REUSE_MATRIX,&Pt);
919: MatMatMultNumeric(Pt,A,C);
920: return(0);
921: }
923: /* Non-scalable version, use dense axpy */
924: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C)
925: {
926: PetscErrorCode ierr;
927: Mat_Merge_SeqsToMPI *merge;
928: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
929: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
930: Mat_PtAPMPI *ptap;
931: PetscInt *adj,*aJ;
932: PetscInt i,j,k,anz,pnz,row,*cj;
933: MatScalar *ada,*aval,*ca,valtmp;
934: PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
935: MPI_Comm comm;
936: PetscMPIInt size,rank,taga,*len_s;
937: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
938: PetscInt **buf_ri,**buf_rj;
939: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
940: MPI_Request *s_waits,*r_waits;
941: MPI_Status *status;
942: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
943: PetscInt *ai,*aj,*coi,*coj;
944: PetscInt *poJ,*pdJ;
945: Mat A_loc;
946: Mat_SeqAIJ *a_loc;
949: PetscObjectGetComm((PetscObject)C,&comm);
950: MPI_Comm_size(comm,&size);
951: MPI_Comm_rank(comm,&rank);
953: ptap = c->ptap;
954: merge = ptap->merge;
956: /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
957: /*--------------------------------------------------------------*/
958: /* get data from symbolic products */
959: coi = merge->coi; coj = merge->coj;
960: PetscCalloc1(coi[pon]+1,&coa);
962: bi = merge->bi; bj = merge->bj;
963: owners = merge->rowmap->range;
964: PetscCalloc1(bi[cm]+1,&ba);
966: /* get A_loc by taking all local rows of A */
967: A_loc = ptap->A_loc;
968: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);
969: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
970: ai = a_loc->i;
971: aj = a_loc->j;
973: PetscCalloc1(A->cmap->N,&aval); /* non-scalable!!! */
975: for (i=0; i<am; i++) {
976: /* 2-a) put A[i,:] to dense array aval */
977: anz = ai[i+1] - ai[i];
978: adj = aj + ai[i];
979: ada = a_loc->a + ai[i];
980: for (j=0; j<anz; j++) {
981: aval[adj[j]] = ada[j];
982: }
984: /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
985: /*--------------------------------------------------------------*/
986: /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
987: pnz = po->i[i+1] - po->i[i];
988: poJ = po->j + po->i[i];
989: pA = po->a + po->i[i];
990: for (j=0; j<pnz; j++) {
991: row = poJ[j];
992: cnz = coi[row+1] - coi[row];
993: cj = coj + coi[row];
994: ca = coa + coi[row];
995: /* perform dense axpy */
996: valtmp = pA[j];
997: for (k=0; k<cnz; k++) {
998: ca[k] += valtmp*aval[cj[k]];
999: }
1000: PetscLogFlops(2.0*cnz);
1001: }
1003: /* put the value into Cd (diagonal part) */
1004: pnz = pd->i[i+1] - pd->i[i];
1005: pdJ = pd->j + pd->i[i];
1006: pA = pd->a + pd->i[i];
1007: for (j=0; j<pnz; j++) {
1008: row = pdJ[j];
1009: cnz = bi[row+1] - bi[row];
1010: cj = bj + bi[row];
1011: ca = ba + bi[row];
1012: /* perform dense axpy */
1013: valtmp = pA[j];
1014: for (k=0; k<cnz; k++) {
1015: ca[k] += valtmp*aval[cj[k]];
1016: }
1017: PetscLogFlops(2.0*cnz);
1018: }
1020: /* zero the current row of Pt*A */
1021: aJ = aj + ai[i];
1022: for (k=0; k<anz; k++) aval[aJ[k]] = 0.0;
1023: }
1025: /* 3) send and recv matrix values coa */
1026: /*------------------------------------*/
1027: buf_ri = merge->buf_ri;
1028: buf_rj = merge->buf_rj;
1029: len_s = merge->len_s;
1030: PetscCommGetNewTag(comm,&taga);
1031: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
1033: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
1034: for (proc=0,k=0; proc<size; proc++) {
1035: if (!len_s[proc]) continue;
1036: i = merge->owners_co[proc];
1037: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
1038: k++;
1039: }
1040: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
1041: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
1043: PetscFree2(s_waits,status);
1044: PetscFree(r_waits);
1045: PetscFree(coa);
1047: /* 4) insert local Cseq and received values into Cmpi */
1048: /*----------------------------------------------------*/
1049: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1050: for (k=0; k<merge->nrecv; k++) {
1051: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1052: nrows = *(buf_ri_k[k]);
1053: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
1054: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1055: }
1057: for (i=0; i<cm; i++) {
1058: row = owners[rank] + i; /* global row index of C_seq */
1059: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1060: ba_i = ba + bi[i];
1061: bnz = bi[i+1] - bi[i];
1062: /* add received vals into ba */
1063: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1064: /* i-th row */
1065: if (i == *nextrow[k]) {
1066: cnz = *(nextci[k]+1) - *nextci[k];
1067: cj = buf_rj[k] + *(nextci[k]);
1068: ca = abuf_r[k] + *(nextci[k]);
1069: nextcj = 0;
1070: for (j=0; nextcj<cnz; j++) {
1071: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1072: ba_i[j] += ca[nextcj++];
1073: }
1074: }
1075: nextrow[k]++; nextci[k]++;
1076: PetscLogFlops(2.0*cnz);
1077: }
1078: }
1079: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
1080: }
1081: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1082: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1084: PetscFree(ba);
1085: PetscFree(abuf_r[0]);
1086: PetscFree(abuf_r);
1087: PetscFree3(buf_ri_k,nextrow,nextci);
1088: PetscFree(aval);
1089: return(0);
1090: }
1092: PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat, MatDuplicateOption,Mat*);
1093: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1094: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat *C)
1095: {
1096: PetscErrorCode ierr;
1097: Mat Cmpi,A_loc,POt,PDt;
1098: Mat_PtAPMPI *ptap;
1099: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1100: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c;
1101: PetscInt *pdti,*pdtj,*poti,*potj,*ptJ;
1102: PetscInt nnz;
1103: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1104: PetscInt am=A->rmap->n,pn=P->cmap->n;
1105: PetscBT lnkbt;
1106: MPI_Comm comm;
1107: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1108: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1109: PetscInt len,proc,*dnz,*onz,*owners;
1110: PetscInt nzi,*bi,*bj;
1111: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1112: MPI_Request *swaits,*rwaits;
1113: MPI_Status *sstatus,rstatus;
1114: Mat_Merge_SeqsToMPI *merge;
1115: PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1116: PetscReal afill =1.0,afill_tmp;
1117: PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N;
1118: PetscScalar *vals;
1119: Mat_SeqAIJ *a_loc, *pdt,*pot;
1122: PetscObjectGetComm((PetscObject)A,&comm);
1123: /* check if matrix local sizes are compatible */
1124: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1126: MPI_Comm_size(comm,&size);
1127: MPI_Comm_rank(comm,&rank);
1129: /* create struct Mat_PtAPMPI and attached it to C later */
1130: PetscNew(&ptap);
1132: /* get A_loc by taking all local rows of A */
1133: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);
1135: ptap->A_loc = A_loc;
1137: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1138: ai = a_loc->i;
1139: aj = a_loc->j;
1141: /* determine symbolic Co=(p->B)^T*A - send to others */
1142: /*----------------------------------------------------*/
1143: MatTransposeSymbolic_SeqAIJ(p->A,&PDt);
1144: pdt = (Mat_SeqAIJ*)PDt->data;
1145: pdti = pdt->i; pdtj = pdt->j;
1147: MatTransposeSymbolic_SeqAIJ(p->B,&POt);
1148: pot = (Mat_SeqAIJ*)POt->data;
1149: poti = pot->i; potj = pot->j;
1151: /* then, compute symbolic Co = (p->B)^T*A */
1152: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors >= (num of nonzero rows of C_seq) - pn */
1153: PetscMalloc1(pon+1,&coi);
1154: coi[0] = 0;
1156: /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1157: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am]));
1158: PetscFreeSpaceGet(nnz,&free_space);
1159: current_space = free_space;
1161: /* create and initialize a linked list */
1162: PetscLLCondensedCreate(aN,aN,&lnk,&lnkbt);
1164: for (i=0; i<pon; i++) {
1165: pnz = poti[i+1] - poti[i];
1166: ptJ = potj + poti[i];
1167: for (j=0; j<pnz; j++) {
1168: row = ptJ[j]; /* row of A_loc == col of Pot */
1169: anz = ai[row+1] - ai[row];
1170: Jptr = aj + ai[row];
1171: /* add non-zero cols of AP into the sorted linked list lnk */
1172: PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);
1173: }
1174: nnz = lnk[0];
1176: /* If free space is not available, double the total space in the list */
1177: if (current_space->local_remaining<nnz) {
1178: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1179: nspacedouble++;
1180: }
1182: /* Copy data into free space, and zero out denserows */
1183: PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);
1185: current_space->array += nnz;
1186: current_space->local_used += nnz;
1187: current_space->local_remaining -= nnz;
1189: coi[i+1] = coi[i] + nnz;
1190: }
1192: PetscMalloc1(coi[pon]+1,&coj);
1193: PetscFreeSpaceContiguous(&free_space,coj);
1195: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1196: if (afill_tmp > afill) afill = afill_tmp;
1198: /* send j-array (coj) of Co to other processors */
1199: /*----------------------------------------------*/
1200: /* determine row ownership */
1201: PetscNew(&merge);
1202: PetscLayoutCreate(comm,&merge->rowmap);
1204: merge->rowmap->n = pn;
1205: merge->rowmap->bs = 1;
1207: PetscLayoutSetUp(merge->rowmap);
1208: owners = merge->rowmap->range;
1210: /* determine the number of messages to send, their lengths */
1211: PetscCalloc1(size,&len_si);
1212: PetscMalloc1(size,&merge->len_s);
1214: len_s = merge->len_s;
1215: merge->nsend = 0;
1217: PetscMalloc1(size+2,&owners_co);
1218: PetscMemzero(len_s,size*sizeof(PetscMPIInt));
1220: proc = 0;
1221: for (i=0; i<pon; i++) {
1222: while (prmap[i] >= owners[proc+1]) proc++;
1223: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1224: len_s[proc] += coi[i+1] - coi[i];
1225: }
1227: len = 0; /* max length of buf_si[] */
1228: owners_co[0] = 0;
1229: for (proc=0; proc<size; proc++) {
1230: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1231: if (len_si[proc]) {
1232: merge->nsend++;
1233: len_si[proc] = 2*(len_si[proc] + 1);
1234: len += len_si[proc];
1235: }
1236: }
1238: /* determine the number and length of messages to receive for coi and coj */
1239: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
1240: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
1242: /* post the Irecv and Isend of coj */
1243: PetscCommGetNewTag(comm,&tagj);
1244: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
1245: PetscMalloc1(merge->nsend+1,&swaits);
1246: for (proc=0, k=0; proc<size; proc++) {
1247: if (!len_s[proc]) continue;
1248: i = owners_co[proc];
1249: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1250: k++;
1251: }
1253: /* receives and sends of coj are complete */
1254: PetscMalloc1(size,&sstatus);
1255: for (i=0; i<merge->nrecv; i++) {
1256: PetscMPIInt icompleted;
1257: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1258: }
1259: PetscFree(rwaits);
1260: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1262: /* send and recv coi */
1263: /*-------------------*/
1264: PetscCommGetNewTag(comm,&tagi);
1265: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
1266: PetscMalloc1(len+1,&buf_s);
1267: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1268: for (proc=0,k=0; proc<size; proc++) {
1269: if (!len_s[proc]) continue;
1270: /* form outgoing message for i-structure:
1271: buf_si[0]: nrows to be sent
1272: [1:nrows]: row index (global)
1273: [nrows+1:2*nrows+1]: i-structure index
1274: */
1275: /*-------------------------------------------*/
1276: nrows = len_si[proc]/2 - 1;
1277: buf_si_i = buf_si + nrows+1;
1278: buf_si[0] = nrows;
1279: buf_si_i[0] = 0;
1280: nrows = 0;
1281: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1282: nzi = coi[i+1] - coi[i];
1283: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1284: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1285: nrows++;
1286: }
1287: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1288: k++;
1289: buf_si += len_si[proc];
1290: }
1291: i = merge->nrecv;
1292: while (i--) {
1293: PetscMPIInt icompleted;
1294: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1295: }
1296: PetscFree(rwaits);
1297: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1298: PetscFree(len_si);
1299: PetscFree(len_ri);
1300: PetscFree(swaits);
1301: PetscFree(sstatus);
1302: PetscFree(buf_s);
1304: /* compute the local portion of C (mpi mat) */
1305: /*------------------------------------------*/
1306: /* allocate bi array and free space for accumulating nonzero column info */
1307: PetscMalloc1(pn+1,&bi);
1308: bi[0] = 0;
1310: /* set initial free space to be fill*(nnz(P) + nnz(A)) */
1311: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am])));
1312: PetscFreeSpaceGet(nnz,&free_space);
1313: current_space = free_space;
1315: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1316: for (k=0; k<merge->nrecv; k++) {
1317: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1318: nrows = *buf_ri_k[k];
1319: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1320: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1321: }
1323: MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);
1324: rmax = 0;
1325: for (i=0; i<pn; i++) {
1326: /* add pdt[i,:]*AP into lnk */
1327: pnz = pdti[i+1] - pdti[i];
1328: ptJ = pdtj + pdti[i];
1329: for (j=0; j<pnz; j++) {
1330: row = ptJ[j]; /* row of AP == col of Pt */
1331: anz = ai[row+1] - ai[row];
1332: Jptr = aj + ai[row];
1333: /* add non-zero cols of AP into the sorted linked list lnk */
1334: PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);
1335: }
1337: /* add received col data into lnk */
1338: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1339: if (i == *nextrow[k]) { /* i-th row */
1340: nzi = *(nextci[k]+1) - *nextci[k];
1341: Jptr = buf_rj[k] + *nextci[k];
1342: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1343: nextrow[k]++; nextci[k]++;
1344: }
1345: }
1346: nnz = lnk[0];
1348: /* if free space is not available, make more free space */
1349: if (current_space->local_remaining<nnz) {
1350: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1351: nspacedouble++;
1352: }
1353: /* copy data into free space, then initialize lnk */
1354: PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);
1355: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
1357: current_space->array += nnz;
1358: current_space->local_used += nnz;
1359: current_space->local_remaining -= nnz;
1361: bi[i+1] = bi[i] + nnz;
1362: if (nnz > rmax) rmax = nnz;
1363: }
1364: PetscFree3(buf_ri_k,nextrow,nextci);
1366: PetscMalloc1(bi[pn]+1,&bj);
1367: PetscFreeSpaceContiguous(&free_space,bj);
1369: afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
1370: if (afill_tmp > afill) afill = afill_tmp;
1371: PetscLLCondensedDestroy(lnk,lnkbt);
1372: MatDestroy(&POt);
1373: MatDestroy(&PDt);
1375: /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */
1376: /*----------------------------------------------------------------------------------*/
1377: PetscCalloc1(rmax+1,&vals);
1379: MatCreate(comm,&Cmpi);
1380: MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);
1381: MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));
1382: MatSetType(Cmpi,MATMPIAIJ);
1383: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
1384: MatPreallocateFinalize(dnz,onz);
1385: MatSetBlockSize(Cmpi,1);
1386: for (i=0; i<pn; i++) {
1387: row = i + rstart;
1388: nnz = bi[i+1] - bi[i];
1389: Jptr = bj + bi[i];
1390: MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);
1391: }
1392: MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);
1393: MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);
1394: PetscFree(vals);
1396: merge->bi = bi;
1397: merge->bj = bj;
1398: merge->coi = coi;
1399: merge->coj = coj;
1400: merge->buf_ri = buf_ri;
1401: merge->buf_rj = buf_rj;
1402: merge->owners_co = owners_co;
1404: /* attach the supporting struct to Cmpi for reuse */
1405: c = (Mat_MPIAIJ*)Cmpi->data;
1406: c->ptap = ptap;
1407: ptap->api = NULL;
1408: ptap->apj = NULL;
1409: ptap->merge = merge;
1410: ptap->destroy = Cmpi->ops->destroy;
1411: ptap->duplicate = Cmpi->ops->duplicate;
1413: Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1414: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
1415: Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
1417: *C = Cmpi;
1418: #if defined(PETSC_USE_INFO)
1419: if (bi[pn] != 0) {
1420: PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
1421: PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);
1422: } else {
1423: PetscInfo(Cmpi,"Empty matrix product\n");
1424: }
1425: #endif
1426: return(0);
1427: }
1429: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C)
1430: {
1431: PetscErrorCode ierr;
1432: Mat_Merge_SeqsToMPI *merge;
1433: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1434: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1435: Mat_PtAPMPI *ptap;
1436: PetscInt *adj;
1437: PetscInt i,j,k,anz,pnz,row,*cj,nexta;
1438: MatScalar *ada,*ca,valtmp;
1439: PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1440: MPI_Comm comm;
1441: PetscMPIInt size,rank,taga,*len_s;
1442: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1443: PetscInt **buf_ri,**buf_rj;
1444: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1445: MPI_Request *s_waits,*r_waits;
1446: MPI_Status *status;
1447: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
1448: PetscInt *ai,*aj,*coi,*coj;
1449: PetscInt *poJ,*pdJ;
1450: Mat A_loc;
1451: Mat_SeqAIJ *a_loc;
1454: PetscObjectGetComm((PetscObject)C,&comm);
1455: MPI_Comm_size(comm,&size);
1456: MPI_Comm_rank(comm,&rank);
1458: ptap = c->ptap;
1459: merge = ptap->merge;
1461: /* 2) compute numeric C_seq = P_loc^T*A_loc */
1462: /*------------------------------------------*/
1463: /* get data from symbolic products */
1464: coi = merge->coi; coj = merge->coj;
1465: PetscCalloc1(coi[pon]+1,&coa);
1466: bi = merge->bi; bj = merge->bj;
1467: owners = merge->rowmap->range;
1468: PetscCalloc1(bi[cm]+1,&ba);
1470: /* get A_loc by taking all local rows of A */
1471: A_loc = ptap->A_loc;
1472: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);
1473: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1474: ai = a_loc->i;
1475: aj = a_loc->j;
1477: for (i=0; i<am; i++) {
1478: anz = ai[i+1] - ai[i];
1479: adj = aj + ai[i];
1480: ada = a_loc->a + ai[i];
1482: /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1483: /*-------------------------------------------------------------*/
1484: /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1485: pnz = po->i[i+1] - po->i[i];
1486: poJ = po->j + po->i[i];
1487: pA = po->a + po->i[i];
1488: for (j=0; j<pnz; j++) {
1489: row = poJ[j];
1490: cj = coj + coi[row];
1491: ca = coa + coi[row];
1492: /* perform sparse axpy */
1493: nexta = 0;
1494: valtmp = pA[j];
1495: for (k=0; nexta<anz; k++) {
1496: if (cj[k] == adj[nexta]) {
1497: ca[k] += valtmp*ada[nexta];
1498: nexta++;
1499: }
1500: }
1501: PetscLogFlops(2.0*anz);
1502: }
1504: /* put the value into Cd (diagonal part) */
1505: pnz = pd->i[i+1] - pd->i[i];
1506: pdJ = pd->j + pd->i[i];
1507: pA = pd->a + pd->i[i];
1508: for (j=0; j<pnz; j++) {
1509: row = pdJ[j];
1510: cj = bj + bi[row];
1511: ca = ba + bi[row];
1512: /* perform sparse axpy */
1513: nexta = 0;
1514: valtmp = pA[j];
1515: for (k=0; nexta<anz; k++) {
1516: if (cj[k] == adj[nexta]) {
1517: ca[k] += valtmp*ada[nexta];
1518: nexta++;
1519: }
1520: }
1521: PetscLogFlops(2.0*anz);
1522: }
1523: }
1525: /* 3) send and recv matrix values coa */
1526: /*------------------------------------*/
1527: buf_ri = merge->buf_ri;
1528: buf_rj = merge->buf_rj;
1529: len_s = merge->len_s;
1530: PetscCommGetNewTag(comm,&taga);
1531: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
1533: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
1534: for (proc=0,k=0; proc<size; proc++) {
1535: if (!len_s[proc]) continue;
1536: i = merge->owners_co[proc];
1537: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
1538: k++;
1539: }
1540: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
1541: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
1543: PetscFree2(s_waits,status);
1544: PetscFree(r_waits);
1545: PetscFree(coa);
1547: /* 4) insert local Cseq and received values into Cmpi */
1548: /*----------------------------------------------------*/
1549: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1550: for (k=0; k<merge->nrecv; k++) {
1551: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1552: nrows = *(buf_ri_k[k]);
1553: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
1554: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1555: }
1557: for (i=0; i<cm; i++) {
1558: row = owners[rank] + i; /* global row index of C_seq */
1559: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1560: ba_i = ba + bi[i];
1561: bnz = bi[i+1] - bi[i];
1562: /* add received vals into ba */
1563: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1564: /* i-th row */
1565: if (i == *nextrow[k]) {
1566: cnz = *(nextci[k]+1) - *nextci[k];
1567: cj = buf_rj[k] + *(nextci[k]);
1568: ca = abuf_r[k] + *(nextci[k]);
1569: nextcj = 0;
1570: for (j=0; nextcj<cnz; j++) {
1571: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1572: ba_i[j] += ca[nextcj++];
1573: }
1574: }
1575: nextrow[k]++; nextci[k]++;
1576: PetscLogFlops(2.0*cnz);
1577: }
1578: }
1579: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
1580: }
1581: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1582: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1584: PetscFree(ba);
1585: PetscFree(abuf_r[0]);
1586: PetscFree(abuf_r);
1587: PetscFree3(buf_ri_k,nextrow,nextci);
1588: return(0);
1589: }
1591: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ();
1592: differ from MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable in using LLCondensedCreate_Scalable() */
1593: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat *C)
1594: {
1595: PetscErrorCode ierr;
1596: Mat Cmpi,A_loc,POt,PDt;
1597: Mat_PtAPMPI *ptap;
1598: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1599: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data,*c;
1600: PetscInt *pdti,*pdtj,*poti,*potj,*ptJ;
1601: PetscInt nnz;
1602: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1603: PetscInt am =A->rmap->n,pn=P->cmap->n;
1604: MPI_Comm comm;
1605: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1606: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1607: PetscInt len,proc,*dnz,*onz,*owners;
1608: PetscInt nzi,*bi,*bj;
1609: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1610: MPI_Request *swaits,*rwaits;
1611: MPI_Status *sstatus,rstatus;
1612: Mat_Merge_SeqsToMPI *merge;
1613: PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1614: PetscReal afill =1.0,afill_tmp;
1615: PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax;
1616: PetscScalar *vals;
1617: Mat_SeqAIJ *a_loc,*pdt,*pot;
1618: PetscTable ta;
1621: PetscObjectGetComm((PetscObject)A,&comm);
1622: /* check if matrix local sizes are compatible */
1623: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1625: MPI_Comm_size(comm,&size);
1626: MPI_Comm_rank(comm,&rank);
1628: /* create struct Mat_PtAPMPI and attached it to C later */
1629: PetscNew(&ptap);
1631: /* get A_loc by taking all local rows of A */
1632: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);
1634: ptap->A_loc = A_loc;
1635: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1636: ai = a_loc->i;
1637: aj = a_loc->j;
1639: /* determine symbolic Co=(p->B)^T*A - send to others */
1640: /*----------------------------------------------------*/
1641: MatTransposeSymbolic_SeqAIJ(p->A,&PDt);
1642: pdt = (Mat_SeqAIJ*)PDt->data;
1643: pdti = pdt->i; pdtj = pdt->j;
1645: MatTransposeSymbolic_SeqAIJ(p->B,&POt);
1646: pot = (Mat_SeqAIJ*)POt->data;
1647: poti = pot->i; potj = pot->j;
1649: /* then, compute symbolic Co = (p->B)^T*A */
1650: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1651: >= (num of nonzero rows of C_seq) - pn */
1652: PetscMalloc1(pon+1,&coi);
1653: coi[0] = 0;
1655: /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1656: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am]));
1657: PetscFreeSpaceGet(nnz,&free_space);
1658: current_space = free_space;
1660: /* create and initialize a linked list */
1661: PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);
1662: MatRowMergeMax_SeqAIJ(a_loc,am,ta);
1663: PetscTableGetCount(ta,&Armax);
1665: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1667: for (i=0; i<pon; i++) {
1668: pnz = poti[i+1] - poti[i];
1669: ptJ = potj + poti[i];
1670: for (j=0; j<pnz; j++) {
1671: row = ptJ[j]; /* row of A_loc == col of Pot */
1672: anz = ai[row+1] - ai[row];
1673: Jptr = aj + ai[row];
1674: /* add non-zero cols of AP into the sorted linked list lnk */
1675: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1676: }
1677: nnz = lnk[0];
1679: /* If free space is not available, double the total space in the list */
1680: if (current_space->local_remaining<nnz) {
1681: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1682: nspacedouble++;
1683: }
1685: /* Copy data into free space, and zero out denserows */
1686: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1688: current_space->array += nnz;
1689: current_space->local_used += nnz;
1690: current_space->local_remaining -= nnz;
1692: coi[i+1] = coi[i] + nnz;
1693: }
1695: PetscMalloc1(coi[pon]+1,&coj);
1696: PetscFreeSpaceContiguous(&free_space,coj);
1697: PetscLLCondensedDestroy_Scalable(lnk); /* must destroy to get a new one for C */
1699: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1700: if (afill_tmp > afill) afill = afill_tmp;
1702: /* send j-array (coj) of Co to other processors */
1703: /*----------------------------------------------*/
1704: /* determine row ownership */
1705: PetscNew(&merge);
1706: PetscLayoutCreate(comm,&merge->rowmap);
1708: merge->rowmap->n = pn;
1709: merge->rowmap->bs = 1;
1711: PetscLayoutSetUp(merge->rowmap);
1712: owners = merge->rowmap->range;
1714: /* determine the number of messages to send, their lengths */
1715: PetscCalloc1(size,&len_si);
1716: PetscMalloc1(size,&merge->len_s);
1718: len_s = merge->len_s;
1719: merge->nsend = 0;
1721: PetscMalloc1(size+2,&owners_co);
1722: PetscMemzero(len_s,size*sizeof(PetscMPIInt));
1724: proc = 0;
1725: for (i=0; i<pon; i++) {
1726: while (prmap[i] >= owners[proc+1]) proc++;
1727: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1728: len_s[proc] += coi[i+1] - coi[i];
1729: }
1731: len = 0; /* max length of buf_si[] */
1732: owners_co[0] = 0;
1733: for (proc=0; proc<size; proc++) {
1734: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1735: if (len_si[proc]) {
1736: merge->nsend++;
1737: len_si[proc] = 2*(len_si[proc] + 1);
1738: len += len_si[proc];
1739: }
1740: }
1742: /* determine the number and length of messages to receive for coi and coj */
1743: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
1744: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
1746: /* post the Irecv and Isend of coj */
1747: PetscCommGetNewTag(comm,&tagj);
1748: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
1749: PetscMalloc1(merge->nsend+1,&swaits);
1750: for (proc=0, k=0; proc<size; proc++) {
1751: if (!len_s[proc]) continue;
1752: i = owners_co[proc];
1753: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1754: k++;
1755: }
1757: /* receives and sends of coj are complete */
1758: PetscMalloc1(size,&sstatus);
1759: for (i=0; i<merge->nrecv; i++) {
1760: PetscMPIInt icompleted;
1761: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1762: }
1763: PetscFree(rwaits);
1764: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1766: /* add received column indices into table to update Armax */
1767: /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/examples/tutorials/ex56.c! */
1768: for (k=0; k<merge->nrecv; k++) {/* k-th received message */
1769: Jptr = buf_rj[k];
1770: for (j=0; j<merge->len_r[k]; j++) {
1771: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1772: }
1773: }
1774: PetscTableGetCount(ta,&Armax);
1775: /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */
1777: /* send and recv coi */
1778: /*-------------------*/
1779: PetscCommGetNewTag(comm,&tagi);
1780: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
1781: PetscMalloc1(len+1,&buf_s);
1782: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1783: for (proc=0,k=0; proc<size; proc++) {
1784: if (!len_s[proc]) continue;
1785: /* form outgoing message for i-structure:
1786: buf_si[0]: nrows to be sent
1787: [1:nrows]: row index (global)
1788: [nrows+1:2*nrows+1]: i-structure index
1789: */
1790: /*-------------------------------------------*/
1791: nrows = len_si[proc]/2 - 1;
1792: buf_si_i = buf_si + nrows+1;
1793: buf_si[0] = nrows;
1794: buf_si_i[0] = 0;
1795: nrows = 0;
1796: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1797: nzi = coi[i+1] - coi[i];
1798: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1799: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1800: nrows++;
1801: }
1802: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1803: k++;
1804: buf_si += len_si[proc];
1805: }
1806: i = merge->nrecv;
1807: while (i--) {
1808: PetscMPIInt icompleted;
1809: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1810: }
1811: PetscFree(rwaits);
1812: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1813: PetscFree(len_si);
1814: PetscFree(len_ri);
1815: PetscFree(swaits);
1816: PetscFree(sstatus);
1817: PetscFree(buf_s);
1819: /* compute the local portion of C (mpi mat) */
1820: /*------------------------------------------*/
1821: /* allocate bi array and free space for accumulating nonzero column info */
1822: PetscMalloc1(pn+1,&bi);
1823: bi[0] = 0;
1825: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1826: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am])));
1827: PetscFreeSpaceGet(nnz,&free_space);
1828: current_space = free_space;
1830: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1831: for (k=0; k<merge->nrecv; k++) {
1832: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1833: nrows = *buf_ri_k[k];
1834: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1835: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recieved i-structure */
1836: }
1838: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1839: MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);
1840: rmax = 0;
1841: for (i=0; i<pn; i++) {
1842: /* add pdt[i,:]*AP into lnk */
1843: pnz = pdti[i+1] - pdti[i];
1844: ptJ = pdtj + pdti[i];
1845: for (j=0; j<pnz; j++) {
1846: row = ptJ[j]; /* row of AP == col of Pt */
1847: anz = ai[row+1] - ai[row];
1848: Jptr = aj + ai[row];
1849: /* add non-zero cols of AP into the sorted linked list lnk */
1850: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1851: }
1853: /* add received col data into lnk */
1854: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1855: if (i == *nextrow[k]) { /* i-th row */
1856: nzi = *(nextci[k]+1) - *nextci[k];
1857: Jptr = buf_rj[k] + *nextci[k];
1858: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
1859: nextrow[k]++; nextci[k]++;
1860: }
1861: }
1862: nnz = lnk[0];
1864: /* if free space is not available, make more free space */
1865: if (current_space->local_remaining<nnz) {
1866: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1867: nspacedouble++;
1868: }
1869: /* copy data into free space, then initialize lnk */
1870: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1871: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
1873: current_space->array += nnz;
1874: current_space->local_used += nnz;
1875: current_space->local_remaining -= nnz;
1877: bi[i+1] = bi[i] + nnz;
1878: if (nnz > rmax) rmax = nnz;
1879: }
1880: PetscFree3(buf_ri_k,nextrow,nextci);
1882: PetscMalloc1(bi[pn]+1,&bj);
1883: PetscFreeSpaceContiguous(&free_space,bj);
1884: afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
1885: if (afill_tmp > afill) afill = afill_tmp;
1886: PetscLLCondensedDestroy_Scalable(lnk);
1887: PetscTableDestroy(&ta);
1889: MatDestroy(&POt);
1890: MatDestroy(&PDt);
1892: /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */
1893: /*----------------------------------------------------------------------------------*/
1894: PetscCalloc1(rmax+1,&vals);
1896: MatCreate(comm,&Cmpi);
1897: MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);
1898: MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));
1899: MatSetType(Cmpi,MATMPIAIJ);
1900: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
1901: MatPreallocateFinalize(dnz,onz);
1902: MatSetBlockSize(Cmpi,1);
1903: for (i=0; i<pn; i++) {
1904: row = i + rstart;
1905: nnz = bi[i+1] - bi[i];
1906: Jptr = bj + bi[i];
1907: MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);
1908: }
1909: MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);
1910: MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);
1911: PetscFree(vals);
1913: merge->bi = bi;
1914: merge->bj = bj;
1915: merge->coi = coi;
1916: merge->coj = coj;
1917: merge->buf_ri = buf_ri;
1918: merge->buf_rj = buf_rj;
1919: merge->owners_co = owners_co;
1921: /* attach the supporting struct to Cmpi for reuse */
1922: c = (Mat_MPIAIJ*)Cmpi->data;
1924: c->ptap = ptap;
1925: ptap->api = NULL;
1926: ptap->apj = NULL;
1927: ptap->merge = merge;
1928: ptap->apa = NULL;
1929: ptap->destroy = Cmpi->ops->destroy;
1930: ptap->duplicate = Cmpi->ops->duplicate;
1932: Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
1933: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
1934: Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
1936: *C = Cmpi;
1937: #if defined(PETSC_USE_INFO)
1938: if (bi[pn] != 0) {
1939: PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
1940: PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);
1941: } else {
1942: PetscInfo(Cmpi,"Empty matrix product\n");
1943: }
1944: #endif
1945: return(0);
1946: }