Actual source code: mpimatmatmult.c
petsc-3.14.1 2020-11-03
2: /*
3: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4: C = A * B
5: */
6: #include <../src/mat/impls/aij/seq/aij.h>
7: #include <../src/mat/utils/freespace.h>
8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
9: #include <petscbt.h>
10: #include <../src/mat/impls/dense/mpi/mpidense.h>
11: #include <petsc/private/vecimpl.h>
12: #include <petsc/private/vecscatterimpl.h>
14: #if defined(PETSC_HAVE_HYPRE)
15: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat);
16: #endif
18: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
19: {
20: PetscErrorCode ierr;
21: Mat_Product *product = C->product;
22: Mat A=product->A,B=product->B;
23: MatProductAlgorithm alg=product->alg;
24: PetscReal fill=product->fill;
25: PetscBool flg;
28: /* scalable */
29: PetscStrcmp(alg,"scalable",&flg);
30: if (flg) {
31: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
32: return(0);
33: }
35: /* nonscalable */
36: PetscStrcmp(alg,"nonscalable",&flg);
37: if (flg) {
38: MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
39: return(0);
40: }
42: /* seqmpi */
43: PetscStrcmp(alg,"seqmpi",&flg);
44: if (flg) {
45: MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C);
46: return(0);
47: }
49: #if defined(PETSC_HAVE_HYPRE)
50: PetscStrcmp(alg,"hypre",&flg);
51: if (flg) {
52: MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);
53: return(0);
54: }
55: #endif
56: SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"Mat Product Algorithm is not supported");
57: }
59: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data)
60: {
62: Mat_APMPI *ptap = (Mat_APMPI*)data;
65: PetscFree2(ptap->startsj_s,ptap->startsj_r);
66: PetscFree(ptap->bufa);
67: MatDestroy(&ptap->P_loc);
68: MatDestroy(&ptap->P_oth);
69: MatDestroy(&ptap->Pt);
70: PetscFree(ptap->api);
71: PetscFree(ptap->apj);
72: PetscFree(ptap->apa);
73: PetscFree(ptap);
74: return(0);
75: }
77: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C)
78: {
80: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
81: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
82: Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
83: PetscScalar *cda=cd->a,*coa=co->a;
84: Mat_SeqAIJ *p_loc,*p_oth;
85: PetscScalar *apa,*ca;
86: PetscInt cm =C->rmap->n;
87: Mat_APMPI *ptap;
88: PetscInt *api,*apj,*apJ,i,k;
89: PetscInt cstart=C->cmap->rstart;
90: PetscInt cdnz,conz,k0,k1;
91: MPI_Comm comm;
92: PetscMPIInt size;
95: MatCheckProduct(C,3);
96: ptap = (Mat_APMPI*)C->product->data;
97: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
98: PetscObjectGetComm((PetscObject)A,&comm);
99: MPI_Comm_size(comm,&size);
101: if (!ptap->P_oth && size>1) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()");
103: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
104: /*-----------------------------------------------------*/
105: /* update numerical values of P_oth and P_loc */
106: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
107: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
109: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
110: /*----------------------------------------------------------*/
111: /* get data from symbolic products */
112: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
113: p_oth = NULL;
114: if (size >1) {
115: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
116: }
118: /* get apa for storing dense row A[i,:]*P */
119: apa = ptap->apa;
121: api = ptap->api;
122: apj = ptap->apj;
123: for (i=0; i<cm; i++) {
124: /* compute apa = A[i,:]*P */
125: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
127: /* set values in C */
128: apJ = apj + api[i];
129: cdnz = cd->i[i+1] - cd->i[i];
130: conz = co->i[i+1] - co->i[i];
132: /* 1st off-diagonal part of C */
133: ca = coa + co->i[i];
134: k = 0;
135: for (k0=0; k0<conz; k0++) {
136: if (apJ[k] >= cstart) break;
137: ca[k0] = apa[apJ[k]];
138: apa[apJ[k++]] = 0.0;
139: }
141: /* diagonal part of C */
142: ca = cda + cd->i[i];
143: for (k1=0; k1<cdnz; k1++) {
144: ca[k1] = apa[apJ[k]];
145: apa[apJ[k++]] = 0.0;
146: }
148: /* 2nd off-diagonal part of C */
149: ca = coa + co->i[i];
150: for (; k0<conz; k0++) {
151: ca[k0] = apa[apJ[k]];
152: apa[apJ[k++]] = 0.0;
153: }
154: }
155: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
156: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
157: return(0);
158: }
160: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat C)
161: {
162: PetscErrorCode ierr;
163: MPI_Comm comm;
164: PetscMPIInt size;
165: Mat_APMPI *ptap;
166: PetscFreeSpaceList free_space=NULL,current_space=NULL;
167: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
168: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
169: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
170: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
171: PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
172: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
173: PetscBT lnkbt;
174: PetscReal afill;
175: MatType mtype;
178: MatCheckProduct(C,4);
179: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
180: PetscObjectGetComm((PetscObject)A,&comm);
181: MPI_Comm_size(comm,&size);
183: /* create struct Mat_APMPI and attached it to C later */
184: PetscNew(&ptap);
186: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
187: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
189: /* get P_loc by taking all local rows of P */
190: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
192: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
193: pi_loc = p_loc->i; pj_loc = p_loc->j;
194: if (size > 1) {
195: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
196: pi_oth = p_oth->i; pj_oth = p_oth->j;
197: } else {
198: p_oth = NULL;
199: pi_oth = NULL; pj_oth = NULL;
200: }
202: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
203: /*-------------------------------------------------------------------*/
204: PetscMalloc1(am+2,&api);
205: ptap->api = api;
206: api[0] = 0;
208: /* create and initialize a linked list */
209: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
211: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
212: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
213: current_space = free_space;
215: MatPreallocateInitialize(comm,am,pn,dnz,onz);
216: for (i=0; i<am; i++) {
217: /* diagonal portion of A */
218: nzi = adi[i+1] - adi[i];
219: for (j=0; j<nzi; j++) {
220: row = *adj++;
221: pnz = pi_loc[row+1] - pi_loc[row];
222: Jptr = pj_loc + pi_loc[row];
223: /* add non-zero cols of P into the sorted linked list lnk */
224: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
225: }
226: /* off-diagonal portion of A */
227: nzi = aoi[i+1] - aoi[i];
228: for (j=0; j<nzi; j++) {
229: row = *aoj++;
230: pnz = pi_oth[row+1] - pi_oth[row];
231: Jptr = pj_oth + pi_oth[row];
232: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
233: }
235: apnz = lnk[0];
236: api[i+1] = api[i] + apnz;
238: /* if free space is not available, double the total space in the list */
239: if (current_space->local_remaining<apnz) {
240: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
241: nspacedouble++;
242: }
244: /* Copy data into free space, then initialize lnk */
245: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
246: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
248: current_space->array += apnz;
249: current_space->local_used += apnz;
250: current_space->local_remaining -= apnz;
251: }
253: /* Allocate space for apj, initialize apj, and */
254: /* destroy list of free space and other temporary array(s) */
255: PetscMalloc1(api[am]+1,&ptap->apj);
256: apj = ptap->apj;
257: PetscFreeSpaceContiguous(&free_space,ptap->apj);
258: PetscLLDestroy(lnk,lnkbt);
260: /* malloc apa to store dense row A[i,:]*P */
261: PetscCalloc1(pN,&ptap->apa);
263: /* set and assemble symbolic parallel matrix C */
264: /*---------------------------------------------*/
265: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
266: MatSetBlockSizesFromMats(C,A,P);
268: MatGetType(A,&mtype);
269: MatSetType(C,mtype);
270: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
271: MatPreallocateFinalize(dnz,onz);
273: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
274: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
275: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
276: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
278: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
279: C->ops->productnumeric = MatProductNumeric_AB;
281: /* attach the supporting struct to C for reuse */
282: C->product->data = ptap;
283: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
285: /* set MatInfo */
286: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
287: if (afill < 1.0) afill = 1.0;
288: C->info.mallocs = nspacedouble;
289: C->info.fill_ratio_given = fill;
290: C->info.fill_ratio_needed = afill;
292: #if defined(PETSC_USE_INFO)
293: if (api[am]) {
294: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
295: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
296: } else {
297: PetscInfo(C,"Empty matrix product\n");
298: }
299: #endif
300: return(0);
301: }
303: /* ------------------------------------------------------- */
304: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat);
305: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat,Mat,Mat);
307: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
308: {
309: Mat_Product *product = C->product;
310: Mat A = product->A,B=product->B;
313: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
314: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
316: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
317: C->ops->productsymbolic = MatProductSymbolic_AB;
318: return(0);
319: }
320: /* -------------------------------------------------------------------- */
321: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
322: {
323: Mat_Product *product = C->product;
324: Mat A = product->A,B=product->B;
327: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
328: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
330: C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
331: C->ops->productsymbolic = MatProductSymbolic_AtB;
332: return(0);
333: }
335: /* --------------------------------------------------------------------- */
336: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
337: {
339: Mat_Product *product = C->product;
342: switch (product->type) {
343: case MATPRODUCT_AB:
344: MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C);
345: break;
346: case MATPRODUCT_AtB:
347: MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C);
348: break;
349: default:
350: break;
351: }
352: return(0);
353: }
354: /* ------------------------------------------------------- */
356: typedef struct {
357: Mat workB,workB1;
358: MPI_Request *rwaits,*swaits;
359: PetscInt nsends,nrecvs;
360: MPI_Datatype *stype,*rtype;
361: PetscInt blda;
362: } MPIAIJ_MPIDense;
364: PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
365: {
366: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*)ctx;
367: PetscErrorCode ierr;
368: PetscInt i;
371: MatDestroy(&contents->workB);
372: MatDestroy(&contents->workB1);
373: for (i=0; i<contents->nsends; i++) {
374: MPI_Type_free(&contents->stype[i]);
375: }
376: for (i=0; i<contents->nrecvs; i++) {
377: MPI_Type_free(&contents->rtype[i]);
378: }
379: PetscFree4(contents->stype,contents->rtype,contents->rwaits,contents->swaits);
380: PetscFree(contents);
381: return(0);
382: }
384: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat C)
385: {
386: PetscErrorCode ierr;
387: Mat_MPIAIJ *aij=(Mat_MPIAIJ*)A->data;
388: PetscInt nz=aij->B->cmap->n,nsends,nrecvs,i,nrows_to,j,blda,clda;
389: MPIAIJ_MPIDense *contents;
390: VecScatter ctx=aij->Mvctx;
391: PetscInt Am=A->rmap->n,Bm=B->rmap->n,BN=B->cmap->N,Bbn,Bbn1,bs,nrows_from,numBb;
392: MPI_Comm comm;
393: MPI_Datatype type1,*stype,*rtype;
394: const PetscInt *sindices,*sstarts,*rstarts;
395: PetscMPIInt *disp;
396: PetscBool cisdense;
399: MatCheckProduct(C,4);
400: if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
401: PetscObjectGetComm((PetscObject)A,&comm);
402: PetscObjectBaseTypeCompare((PetscObject)C,MATMPIDENSE,&cisdense);
403: if (!cisdense) {
404: MatSetType(C,((PetscObject)B)->type_name);
405: }
406: MatSetSizes(C,Am,B->cmap->n,A->rmap->N,BN);
407: MatSetBlockSizesFromMats(C,A,B);
408: MatSetUp(C);
409: MatDenseGetLDA(B,&blda);
410: MatDenseGetLDA(C,&clda);
411: PetscNew(&contents);
413: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
414: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
416: /* Create column block of B and C for memory scalability when BN is too large */
417: /* Estimate Bbn, column size of Bb */
418: if (nz) {
419: Bbn1 = 2*Am*BN/nz;
420: } else Bbn1 = BN;
422: bs = PetscAbs(B->cmap->bs);
423: Bbn1 = Bbn1/bs *bs; /* Bbn1 is a multiple of bs */
424: if (Bbn1 > BN) Bbn1 = BN;
425: MPI_Allreduce(&Bbn1,&Bbn,1,MPIU_INT,MPI_MAX,comm);
427: /* Enable runtime option for Bbn */
428: PetscOptionsBegin(comm,((PetscObject)C)->prefix,"MatMatMult","Mat");
429: PetscOptionsInt("-matmatmult_Bbn","Number of columns in Bb","MatMatMult",Bbn,&Bbn,NULL);
430: PetscOptionsEnd();
431: Bbn = PetscMin(Bbn,BN);
433: if (Bbn > 0 && Bbn < BN) {
434: numBb = BN/Bbn;
435: Bbn1 = BN - numBb*Bbn;
436: } else numBb = 0;
438: if (numBb) {
439: PetscInfo3(C,"use Bb, BN=%D, Bbn=%D; numBb=%D\n",BN,Bbn,numBb);
440: if (Bbn1) { /* Create workB1 for the remaining columns */
441: PetscInfo2(C,"use Bb1, BN=%D, Bbn1=%D\n",BN,Bbn1);
442: /* Create work matrix used to store off processor rows of B needed for local product */
443: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn1,NULL,&contents->workB1);
444: } else contents->workB1 = NULL;
445: }
447: /* Create work matrix used to store off processor rows of B needed for local product */
448: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn,NULL,&contents->workB);
450: /* Use MPI derived data type to reduce memory required by the send/recv buffers */
451: PetscMalloc4(nsends,&stype,nrecvs,&rtype,nrecvs,&contents->rwaits,nsends,&contents->swaits);
452: contents->stype = stype;
453: contents->nsends = nsends;
455: contents->rtype = rtype;
456: contents->nrecvs = nrecvs;
457: contents->blda = blda;
459: PetscMalloc1(Bm+1,&disp);
460: for (i=0; i<nsends; i++) {
461: nrows_to = sstarts[i+1]-sstarts[i];
462: for (j=0; j<nrows_to; j++){
463: disp[j] = sindices[sstarts[i]+j]; /* rowB to be sent */
464: }
465: MPI_Type_create_indexed_block(nrows_to,1,(const PetscMPIInt *)disp,MPIU_SCALAR,&type1);
467: MPI_Type_create_resized(type1,0,blda*sizeof(PetscScalar),&stype[i]);
468: MPI_Type_commit(&stype[i]);
469: MPI_Type_free(&type1);
470: }
472: for (i=0; i<nrecvs; i++) {
473: /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
474: nrows_from = rstarts[i+1]-rstarts[i];
475: disp[0] = 0;
476: MPI_Type_create_indexed_block(1, nrows_from, (const PetscMPIInt *)disp, MPIU_SCALAR, &type1);
477: MPI_Type_create_resized(type1, 0, nz*sizeof(PetscScalar), &rtype[i]);
478: MPI_Type_commit(&rtype[i]);
479: MPI_Type_free(&type1);
480: }
482: PetscFree(disp);
483: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
484: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
485: MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
486: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
487: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
488: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
490: C->product->data = contents;
491: C->product->destroy = MatMPIAIJ_MPIDenseDestroy;
492: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
493: return(0);
494: }
496: PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat,const PetscBool);
497: /*
498: Performs an efficient scatter on the rows of B needed by this process; this is
499: a modification of the VecScatterBegin_() routines.
501: Input: Bbidx = 0: B = Bb
502: = 1: B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
503: */
504: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,PetscInt Bbidx,Mat C,Mat *outworkB)
505: {
506: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
507: PetscErrorCode ierr;
508: const PetscScalar *b;
509: PetscScalar *rvalues;
510: VecScatter ctx = aij->Mvctx;
511: const PetscInt *sindices,*sstarts,*rstarts;
512: const PetscMPIInt *sprocs,*rprocs;
513: PetscInt i,nsends,nrecvs;
514: MPI_Request *swaits,*rwaits;
515: MPI_Comm comm;
516: PetscMPIInt tag=((PetscObject)ctx)->tag,ncols=B->cmap->N,nrows=aij->B->cmap->n,nsends_mpi,nrecvs_mpi;
517: MPIAIJ_MPIDense *contents;
518: Mat workB;
519: MPI_Datatype *stype,*rtype;
520: PetscInt blda;
523: MatCheckProduct(C,4);
524: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
525: contents = (MPIAIJ_MPIDense*)C->product->data;
526: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL/*bs*/);
527: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL/*bs*/);
528: PetscMPIIntCast(nsends,&nsends_mpi);
529: PetscMPIIntCast(nrecvs,&nrecvs_mpi);
530: if (Bbidx == 0) {
531: workB = *outworkB = contents->workB;
532: } else {
533: workB = *outworkB = contents->workB1;
534: }
535: if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",workB->cmap->n,nrows);
536: swaits = contents->swaits;
537: rwaits = contents->rwaits;
539: MatDenseGetArrayRead(B,&b);
540: MatDenseGetLDA(B,&blda);
541: if (blda != contents->blda) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot reuse an input matrix with lda %D != %D",blda,contents->blda);
542: MatDenseGetArray(workB,&rvalues);
544: /* Post recv, use MPI derived data type to save memory */
545: PetscObjectGetComm((PetscObject)C,&comm);
546: rtype = contents->rtype;
547: for (i=0; i<nrecvs; i++) {
548: MPI_Irecv(rvalues+(rstarts[i]-rstarts[0]),ncols,rtype[i],rprocs[i],tag,comm,rwaits+i);
549: }
551: stype = contents->stype;
552: for (i=0; i<nsends; i++) {
553: MPI_Isend(b,ncols,stype[i],sprocs[i],tag,comm,swaits+i);
554: }
556: if (nrecvs) {MPI_Waitall(nrecvs_mpi,rwaits,MPI_STATUSES_IGNORE);}
557: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
559: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL);
560: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL);
561: MatDenseRestoreArrayRead(B,&b);
562: MatDenseRestoreArray(workB,&rvalues);
563: return(0);
564: }
566: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
567: {
568: PetscErrorCode ierr;
569: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
570: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
571: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
572: Mat workB;
573: MPIAIJ_MPIDense *contents;
576: MatCheckProduct(C,3);
577: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
578: contents = (MPIAIJ_MPIDense*)C->product->data;
579: /* diagonal block of A times all local rows of B */
580: /* TODO: this calls a symbolic multiplication every time, which could be avoided */
581: MatMatMult(aij->A,bdense->A,MAT_REUSE_MATRIX,PETSC_DEFAULT,&cdense->A);
582: if (contents->workB->cmap->n == B->cmap->N) {
583: /* get off processor parts of B needed to complete C=A*B */
584: MatMPIDenseScatter(A,B,0,C,&workB);
586: /* off-diagonal block of A times nonlocal rows of B */
587: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);
588: } else {
589: Mat Bb,Cb;
590: PetscInt BN=B->cmap->N,n=contents->workB->cmap->n,i;
592: for (i=0; i<BN; i+=n) {
593: MatDenseGetSubMatrix(B,i,PetscMin(i+n,BN),&Bb);
594: MatDenseGetSubMatrix(C,i,PetscMin(i+n,BN),&Cb);
596: /* get off processor parts of B needed to complete C=A*B */
597: MatMPIDenseScatter(A,Bb,i+n>BN,C,&workB);
599: /* off-diagonal block of A times nonlocal rows of B */
600: cdense = (Mat_MPIDense*)Cb->data;
601: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);
603: MatDenseRestoreSubMatrix(B,&Bb);
604: MatDenseRestoreSubMatrix(C,&Cb);
605: }
606: }
607: return(0);
608: }
610: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
611: {
613: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
614: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
615: Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
616: PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj;
617: PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a;
618: Mat_SeqAIJ *p_loc,*p_oth;
619: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
620: PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca;
621: PetscInt cm = C->rmap->n,anz,pnz;
622: Mat_APMPI *ptap;
623: PetscScalar *apa_sparse;
624: PetscInt *api,*apj,*apJ,i,j,k,row;
625: PetscInt cstart = C->cmap->rstart;
626: PetscInt cdnz,conz,k0,k1,nextp;
627: MPI_Comm comm;
628: PetscMPIInt size;
631: MatCheckProduct(C,3);
632: ptap = (Mat_APMPI*)C->product->data;
633: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
634: PetscObjectGetComm((PetscObject)C,&comm);
635: MPI_Comm_size(comm,&size);
636: if (!ptap->P_oth && size>1) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()");
638: apa_sparse = ptap->apa;
640: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
641: /*-----------------------------------------------------*/
642: /* update numerical values of P_oth and P_loc */
643: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
644: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
646: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
647: /*----------------------------------------------------------*/
648: /* get data from symbolic products */
649: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
650: pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a;
651: if (size >1) {
652: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
653: pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a;
654: } else {
655: p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL;
656: }
658: api = ptap->api;
659: apj = ptap->apj;
660: for (i=0; i<cm; i++) {
661: apJ = apj + api[i];
663: /* diagonal portion of A */
664: anz = adi[i+1] - adi[i];
665: adj = ad->j + adi[i];
666: ada = ad->a + adi[i];
667: for (j=0; j<anz; j++) {
668: row = adj[j];
669: pnz = pi_loc[row+1] - pi_loc[row];
670: pj = pj_loc + pi_loc[row];
671: pa = pa_loc + pi_loc[row];
672: /* perform sparse axpy */
673: valtmp = ada[j];
674: nextp = 0;
675: for (k=0; nextp<pnz; k++) {
676: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
677: apa_sparse[k] += valtmp*pa[nextp++];
678: }
679: }
680: PetscLogFlops(2.0*pnz);
681: }
683: /* off-diagonal portion of A */
684: anz = aoi[i+1] - aoi[i];
685: aoj = ao->j + aoi[i];
686: aoa = ao->a + aoi[i];
687: for (j=0; j<anz; j++) {
688: row = aoj[j];
689: pnz = pi_oth[row+1] - pi_oth[row];
690: pj = pj_oth + pi_oth[row];
691: pa = pa_oth + pi_oth[row];
692: /* perform sparse axpy */
693: valtmp = aoa[j];
694: nextp = 0;
695: for (k=0; nextp<pnz; k++) {
696: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
697: apa_sparse[k] += valtmp*pa[nextp++];
698: }
699: }
700: PetscLogFlops(2.0*pnz);
701: }
703: /* set values in C */
704: cdnz = cd->i[i+1] - cd->i[i];
705: conz = co->i[i+1] - co->i[i];
707: /* 1st off-diagonal part of C */
708: ca = coa + co->i[i];
709: k = 0;
710: for (k0=0; k0<conz; k0++) {
711: if (apJ[k] >= cstart) break;
712: ca[k0] = apa_sparse[k];
713: apa_sparse[k] = 0.0;
714: k++;
715: }
717: /* diagonal part of C */
718: ca = cda + cd->i[i];
719: for (k1=0; k1<cdnz; k1++) {
720: ca[k1] = apa_sparse[k];
721: apa_sparse[k] = 0.0;
722: k++;
723: }
725: /* 2nd off-diagonal part of C */
726: ca = coa + co->i[i];
727: for (; k0<conz; k0++) {
728: ca[k0] = apa_sparse[k];
729: apa_sparse[k] = 0.0;
730: k++;
731: }
732: }
733: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
734: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
735: return(0);
736: }
738: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
739: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat C)
740: {
741: PetscErrorCode ierr;
742: MPI_Comm comm;
743: PetscMPIInt size;
744: Mat_APMPI *ptap;
745: PetscFreeSpaceList free_space = NULL,current_space=NULL;
746: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
747: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
748: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
749: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
750: PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0;
751: PetscInt am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20;
752: PetscReal afill;
753: MatType mtype;
756: MatCheckProduct(C,4);
757: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
758: PetscObjectGetComm((PetscObject)A,&comm);
759: MPI_Comm_size(comm,&size);
761: /* create struct Mat_APMPI and attached it to C later */
762: PetscNew(&ptap);
764: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
765: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
767: /* get P_loc by taking all local rows of P */
768: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
770: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
771: pi_loc = p_loc->i; pj_loc = p_loc->j;
772: if (size > 1) {
773: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
774: pi_oth = p_oth->i; pj_oth = p_oth->j;
775: } else {
776: p_oth = NULL;
777: pi_oth = NULL; pj_oth = NULL;
778: }
780: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
781: /*-------------------------------------------------------------------*/
782: PetscMalloc1(am+2,&api);
783: ptap->api = api;
784: api[0] = 0;
786: PetscLLCondensedCreate_Scalable(lsize,&lnk);
788: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
789: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
790: current_space = free_space;
791: MatPreallocateInitialize(comm,am,pn,dnz,onz);
792: for (i=0; i<am; i++) {
793: /* diagonal portion of A */
794: nzi = adi[i+1] - adi[i];
795: for (j=0; j<nzi; j++) {
796: row = *adj++;
797: pnz = pi_loc[row+1] - pi_loc[row];
798: Jptr = pj_loc + pi_loc[row];
799: /* Expand list if it is not long enough */
800: if (pnz+apnz_max > lsize) {
801: lsize = pnz+apnz_max;
802: PetscLLCondensedExpand_Scalable(lsize, &lnk);
803: }
804: /* add non-zero cols of P into the sorted linked list lnk */
805: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
806: apnz = *lnk; /* The first element in the list is the number of items in the list */
807: api[i+1] = api[i] + apnz;
808: if (apnz > apnz_max) apnz_max = apnz;
809: }
810: /* off-diagonal portion of A */
811: nzi = aoi[i+1] - aoi[i];
812: for (j=0; j<nzi; j++) {
813: row = *aoj++;
814: pnz = pi_oth[row+1] - pi_oth[row];
815: Jptr = pj_oth + pi_oth[row];
816: /* Expand list if it is not long enough */
817: if (pnz+apnz_max > lsize) {
818: lsize = pnz + apnz_max;
819: PetscLLCondensedExpand_Scalable(lsize, &lnk);
820: }
821: /* add non-zero cols of P into the sorted linked list lnk */
822: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
823: apnz = *lnk; /* The first element in the list is the number of items in the list */
824: api[i+1] = api[i] + apnz;
825: if (apnz > apnz_max) apnz_max = apnz;
826: }
827: apnz = *lnk;
828: api[i+1] = api[i] + apnz;
829: if (apnz > apnz_max) apnz_max = apnz;
831: /* if free space is not available, double the total space in the list */
832: if (current_space->local_remaining<apnz) {
833: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
834: nspacedouble++;
835: }
837: /* Copy data into free space, then initialize lnk */
838: PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);
839: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
841: current_space->array += apnz;
842: current_space->local_used += apnz;
843: current_space->local_remaining -= apnz;
844: }
846: /* Allocate space for apj, initialize apj, and */
847: /* destroy list of free space and other temporary array(s) */
848: PetscMalloc1(api[am]+1,&ptap->apj);
849: apj = ptap->apj;
850: PetscFreeSpaceContiguous(&free_space,ptap->apj);
851: PetscLLCondensedDestroy_Scalable(lnk);
853: /* create and assemble symbolic parallel matrix C */
854: /*----------------------------------------------------*/
855: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
856: MatSetBlockSizesFromMats(C,A,P);
857: MatGetType(A,&mtype);
858: MatSetType(C,mtype);
859: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
860: MatPreallocateFinalize(dnz,onz);
862: /* malloc apa for assembly C */
863: PetscCalloc1(apnz_max,&ptap->apa);
865: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
866: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
867: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
868: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
870: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
871: C->ops->productnumeric = MatProductNumeric_AB;
873: /* attach the supporting struct to C for reuse */
874: C->product->data = ptap;
875: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
877: /* set MatInfo */
878: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
879: if (afill < 1.0) afill = 1.0;
880: C->info.mallocs = nspacedouble;
881: C->info.fill_ratio_given = fill;
882: C->info.fill_ratio_needed = afill;
884: #if defined(PETSC_USE_INFO)
885: if (api[am]) {
886: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
887: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
888: } else {
889: PetscInfo(C,"Empty matrix product\n");
890: }
891: #endif
892: return(0);
893: }
895: /* This function is needed for the seqMPI matrix-matrix multiplication. */
896: /* Three input arrays are merged to one output array. The size of the */
897: /* output array is also output. Duplicate entries only show up once. */
898: static void Merge3SortedArrays(PetscInt size1, PetscInt *in1,
899: PetscInt size2, PetscInt *in2,
900: PetscInt size3, PetscInt *in3,
901: PetscInt *size4, PetscInt *out)
902: {
903: int i = 0, j = 0, k = 0, l = 0;
905: /* Traverse all three arrays */
906: while (i<size1 && j<size2 && k<size3) {
907: if (in1[i] < in2[j] && in1[i] < in3[k]) {
908: out[l++] = in1[i++];
909: }
910: else if (in2[j] < in1[i] && in2[j] < in3[k]) {
911: out[l++] = in2[j++];
912: }
913: else if (in3[k] < in1[i] && in3[k] < in2[j]) {
914: out[l++] = in3[k++];
915: }
916: else if (in1[i] == in2[j] && in1[i] < in3[k]) {
917: out[l++] = in1[i];
918: i++, j++;
919: }
920: else if (in1[i] == in3[k] && in1[i] < in2[j]) {
921: out[l++] = in1[i];
922: i++, k++;
923: }
924: else if (in3[k] == in2[j] && in2[j] < in1[i]) {
925: out[l++] = in2[j];
926: k++, j++;
927: }
928: else if (in1[i] == in2[j] && in1[i] == in3[k]) {
929: out[l++] = in1[i];
930: i++, j++, k++;
931: }
932: }
934: /* Traverse two remaining arrays */
935: while (i<size1 && j<size2) {
936: if (in1[i] < in2[j]) {
937: out[l++] = in1[i++];
938: }
939: else if (in1[i] > in2[j]) {
940: out[l++] = in2[j++];
941: }
942: else {
943: out[l++] = in1[i];
944: i++, j++;
945: }
946: }
948: while (i<size1 && k<size3) {
949: if (in1[i] < in3[k]) {
950: out[l++] = in1[i++];
951: }
952: else if (in1[i] > in3[k]) {
953: out[l++] = in3[k++];
954: }
955: else {
956: out[l++] = in1[i];
957: i++, k++;
958: }
959: }
961: while (k<size3 && j<size2) {
962: if (in3[k] < in2[j]) {
963: out[l++] = in3[k++];
964: }
965: else if (in3[k] > in2[j]) {
966: out[l++] = in2[j++];
967: }
968: else {
969: out[l++] = in3[k];
970: k++, j++;
971: }
972: }
974: /* Traverse one remaining array */
975: while (i<size1) out[l++] = in1[i++];
976: while (j<size2) out[l++] = in2[j++];
977: while (k<size3) out[l++] = in3[k++];
979: *size4 = l;
980: }
982: /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */
983: /* adds up the products. Two of these three multiplications are performed with existing (sequential) */
984: /* matrix-matrix multiplications. */
985: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
986: {
987: PetscErrorCode ierr;
988: MPI_Comm comm;
989: PetscMPIInt size;
990: Mat_APMPI *ptap;
991: PetscFreeSpaceList free_space_diag=NULL, current_space=NULL;
992: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data;
993: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc;
994: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
995: Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq;
996: PetscInt adponz, adpdnz;
997: PetscInt *pi_loc,*dnz,*onz;
998: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart;
999: PetscInt *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi,
1000: *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1001: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend;
1002: PetscBT lnkbt;
1003: PetscReal afill;
1004: PetscMPIInt rank;
1005: Mat adpd, aopoth;
1006: MatType mtype;
1007: const char *prefix;
1010: MatCheckProduct(C,4);
1011: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
1012: PetscObjectGetComm((PetscObject)A,&comm);
1013: MPI_Comm_size(comm,&size);
1014: MPI_Comm_rank(comm, &rank);
1015: MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend);
1017: /* create struct Mat_APMPI and attached it to C later */
1018: PetscNew(&ptap);
1020: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1021: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
1023: /* get P_loc by taking all local rows of P */
1024: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
1027: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1028: pi_loc = p_loc->i;
1030: /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1031: PetscMalloc1(am+2,&api);
1032: PetscMalloc1(am+2,&adpoi);
1034: adpoi[0] = 0;
1035: ptap->api = api;
1036: api[0] = 0;
1038: /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1039: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
1040: MatPreallocateInitialize(comm,am,pn,dnz,onz);
1042: /* Symbolic calc of A_loc_diag * P_loc_diag */
1043: MatGetOptionsPrefix(A,&prefix);
1044: MatProductCreate(a->A,p->A,NULL,&adpd);
1045: MatGetOptionsPrefix(A,&prefix);
1046: MatSetOptionsPrefix(adpd,prefix);
1047: MatAppendOptionsPrefix(adpd,"inner_diag_");
1049: MatProductSetType(adpd,MATPRODUCT_AB);
1050: MatProductSetAlgorithm(adpd,"sorted");
1051: MatProductSetFill(adpd,fill);
1052: MatProductSetFromOptions(adpd);
1053: MatProductSymbolic(adpd);
1055: adpd_seq = (Mat_SeqAIJ*)((adpd)->data);
1056: adpdi = adpd_seq->i; adpdj = adpd_seq->j;
1057: p_off = (Mat_SeqAIJ*)((p->B)->data);
1058: poff_i = p_off->i; poff_j = p_off->j;
1060: /* j_temp stores indices of a result row before they are added to the linked list */
1061: PetscMalloc1(pN+2,&j_temp);
1064: /* Symbolic calc of the A_diag * p_loc_off */
1065: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1066: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag);
1067: current_space = free_space_diag;
1069: for (i=0; i<am; i++) {
1070: /* A_diag * P_loc_off */
1071: nzi = adi[i+1] - adi[i];
1072: for (j=0; j<nzi; j++) {
1073: row = *adj++;
1074: pnz = poff_i[row+1] - poff_i[row];
1075: Jptr = poff_j + poff_i[row];
1076: for (i1 = 0; i1 < pnz; i1++) {
1077: j_temp[i1] = p->garray[Jptr[i1]];
1078: }
1079: /* add non-zero cols of P into the sorted linked list lnk */
1080: PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt);
1081: }
1083: adponz = lnk[0];
1084: adpoi[i+1] = adpoi[i] + adponz;
1086: /* if free space is not available, double the total space in the list */
1087: if (current_space->local_remaining<adponz) {
1088: PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),¤t_space);
1089: nspacedouble++;
1090: }
1092: /* Copy data into free space, then initialize lnk */
1093: PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt);
1095: current_space->array += adponz;
1096: current_space->local_used += adponz;
1097: current_space->local_remaining -= adponz;
1098: }
1100: /* Symbolic calc of A_off * P_oth */
1101: MatSetOptionsPrefix(a->B,prefix);
1102: MatAppendOptionsPrefix(a->B,"inner_offdiag_");
1103: MatCreate(PETSC_COMM_SELF,&aopoth);
1104: MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth);
1105: aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data);
1106: aopothi = aopoth_seq->i; aopothj = aopoth_seq->j;
1108: /* Allocate space for apj, adpj, aopj, ... */
1109: /* destroy lists of free space and other temporary array(s) */
1111: PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj);
1112: PetscMalloc1(adpoi[am]+2, &adpoj);
1114: /* Copy from linked list to j-array */
1115: PetscFreeSpaceContiguous(&free_space_diag,adpoj);
1116: PetscLLDestroy(lnk,lnkbt);
1118: adpoJ = adpoj;
1119: adpdJ = adpdj;
1120: aopJ = aopothj;
1121: apj = ptap->apj;
1122: apJ = apj; /* still empty */
1124: /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1125: /* A_diag * P_loc_diag to get A*P */
1126: for (i = 0; i < am; i++) {
1127: aopnz = aopothi[i+1] - aopothi[i];
1128: adponz = adpoi[i+1] - adpoi[i];
1129: adpdnz = adpdi[i+1] - adpdi[i];
1131: /* Correct indices from A_diag*P_diag */
1132: for (i1 = 0; i1 < adpdnz; i1++) {
1133: adpdJ[i1] += p_colstart;
1134: }
1135: /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1136: Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1137: MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz);
1139: aopJ += aopnz;
1140: adpoJ += adponz;
1141: adpdJ += adpdnz;
1142: apJ += apnz;
1143: api[i+1] = api[i] + apnz;
1144: }
1146: /* malloc apa to store dense row A[i,:]*P */
1147: PetscCalloc1(pN+2,&ptap->apa);
1149: /* create and assemble symbolic parallel matrix C */
1150: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
1151: MatSetBlockSizesFromMats(C,A,P);
1152: MatGetType(A,&mtype);
1153: MatSetType(C,mtype);
1154: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1155: MatPreallocateFinalize(dnz,onz);
1157: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
1158: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1159: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1160: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1162: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1163: C->ops->productnumeric = MatProductNumeric_AB;
1165: /* attach the supporting struct to C for reuse */
1166: C->product->data = ptap;
1167: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
1169: /* set MatInfo */
1170: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
1171: if (afill < 1.0) afill = 1.0;
1172: C->info.mallocs = nspacedouble;
1173: C->info.fill_ratio_given = fill;
1174: C->info.fill_ratio_needed = afill;
1176: #if defined(PETSC_USE_INFO)
1177: if (api[am]) {
1178: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
1179: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
1180: } else {
1181: PetscInfo(C,"Empty matrix product\n");
1182: }
1183: #endif
1185: MatDestroy(&aopoth);
1186: MatDestroy(&adpd);
1187: PetscFree(j_temp);
1188: PetscFree(adpoj);
1189: PetscFree(adpoi);
1190: return(0);
1191: }
1193: /*-------------------------------------------------------------------------*/
1194: /* This routine only works when scall=MAT_REUSE_MATRIX! */
1195: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C)
1196: {
1198: Mat_APMPI *ptap;
1199: Mat Pt;
1202: MatCheckProduct(C,3);
1203: ptap = (Mat_APMPI*)C->product->data;
1204: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1205: if (!ptap->Pt) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1207: Pt = ptap->Pt;
1208: MatTranspose(P,MAT_REUSE_MATRIX,&Pt);
1209: MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt,A,C);
1210: return(0);
1211: }
1213: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1214: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat C)
1215: {
1216: PetscErrorCode ierr;
1217: Mat_APMPI *ptap;
1218: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
1219: MPI_Comm comm;
1220: PetscMPIInt size,rank;
1221: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1222: PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n;
1223: PetscInt *lnk,i,k,nsend,rstart;
1224: PetscBT lnkbt;
1225: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
1226: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1227: PetscInt len,proc,*dnz,*onz,*owners,nzi;
1228: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1229: MPI_Request *swaits,*rwaits;
1230: MPI_Status *sstatus,rstatus;
1231: PetscLayout rowmap;
1232: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1233: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
1234: PetscInt *Jptr,*prmap=p->garray,con,j,Crmax;
1235: Mat_SeqAIJ *a_loc,*c_loc,*c_oth;
1236: PetscTable ta;
1237: MatType mtype;
1238: const char *prefix;
1241: PetscObjectGetComm((PetscObject)A,&comm);
1242: MPI_Comm_size(comm,&size);
1243: MPI_Comm_rank(comm,&rank);
1245: /* create symbolic parallel matrix C */
1246: MatGetType(A,&mtype);
1247: MatSetType(C,mtype);
1249: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1251: /* create struct Mat_APMPI and attached it to C later */
1252: PetscNew(&ptap);
1253: ptap->reuse = MAT_INITIAL_MATRIX;
1255: /* (0) compute Rd = Pd^T, Ro = Po^T */
1256: /* --------------------------------- */
1257: MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
1258: MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
1260: /* (1) compute symbolic A_loc */
1261: /* ---------------------------*/
1262: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);
1264: /* (2-1) compute symbolic C_oth = Ro*A_loc */
1265: /* ------------------------------------ */
1266: MatGetOptionsPrefix(A,&prefix);
1267: MatSetOptionsPrefix(ptap->Ro,prefix);
1268: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
1269: MatCreate(PETSC_COMM_SELF,&ptap->C_oth);
1270: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,ptap->C_oth);
1272: /* (3) send coj of C_oth to other processors */
1273: /* ------------------------------------------ */
1274: /* determine row ownership */
1275: PetscLayoutCreate(comm,&rowmap);
1276: rowmap->n = pn;
1277: rowmap->bs = 1;
1278: PetscLayoutSetUp(rowmap);
1279: owners = rowmap->range;
1281: /* determine the number of messages to send, their lengths */
1282: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
1283: PetscArrayzero(len_s,size);
1284: PetscArrayzero(len_si,size);
1286: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
1287: coi = c_oth->i; coj = c_oth->j;
1288: con = ptap->C_oth->rmap->n;
1289: proc = 0;
1290: for (i=0; i<con; i++) {
1291: while (prmap[i] >= owners[proc+1]) proc++;
1292: len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */
1293: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1294: }
1296: len = 0; /* max length of buf_si[], see (4) */
1297: owners_co[0] = 0;
1298: nsend = 0;
1299: for (proc=0; proc<size; proc++) {
1300: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1301: if (len_s[proc]) {
1302: nsend++;
1303: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1304: len += len_si[proc];
1305: }
1306: }
1308: /* determine the number and length of messages to receive for coi and coj */
1309: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
1310: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
1312: /* post the Irecv and Isend of coj */
1313: PetscCommGetNewTag(comm,&tagj);
1314: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
1315: PetscMalloc1(nsend+1,&swaits);
1316: for (proc=0, k=0; proc<size; proc++) {
1317: if (!len_s[proc]) continue;
1318: i = owners_co[proc];
1319: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1320: k++;
1321: }
1323: /* (2-2) compute symbolic C_loc = Rd*A_loc */
1324: /* ---------------------------------------- */
1325: MatSetOptionsPrefix(ptap->Rd,prefix);
1326: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
1327: MatCreate(PETSC_COMM_SELF,&ptap->C_loc);
1328: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,ptap->C_loc);
1329: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
1331: /* receives coj are complete */
1332: for (i=0; i<nrecv; i++) {
1333: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1334: }
1335: PetscFree(rwaits);
1336: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
1338: /* add received column indices into ta to update Crmax */
1339: a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data;
1341: /* create and initialize a linked list */
1342: PetscTableCreate(an,aN,&ta); /* for compute Crmax */
1343: MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta);
1345: for (k=0; k<nrecv; k++) {/* k-th received message */
1346: Jptr = buf_rj[k];
1347: for (j=0; j<len_r[k]; j++) {
1348: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1349: }
1350: }
1351: PetscTableGetCount(ta,&Crmax);
1352: PetscTableDestroy(&ta);
1354: /* (4) send and recv coi */
1355: /*-----------------------*/
1356: PetscCommGetNewTag(comm,&tagi);
1357: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
1358: PetscMalloc1(len+1,&buf_s);
1359: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1360: for (proc=0,k=0; proc<size; proc++) {
1361: if (!len_s[proc]) continue;
1362: /* form outgoing message for i-structure:
1363: buf_si[0]: nrows to be sent
1364: [1:nrows]: row index (global)
1365: [nrows+1:2*nrows+1]: i-structure index
1366: */
1367: /*-------------------------------------------*/
1368: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
1369: buf_si_i = buf_si + nrows+1;
1370: buf_si[0] = nrows;
1371: buf_si_i[0] = 0;
1372: nrows = 0;
1373: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1374: nzi = coi[i+1] - coi[i];
1375: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1376: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1377: nrows++;
1378: }
1379: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1380: k++;
1381: buf_si += len_si[proc];
1382: }
1383: for (i=0; i<nrecv; i++) {
1384: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1385: }
1386: PetscFree(rwaits);
1387: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
1389: PetscFree4(len_s,len_si,sstatus,owners_co);
1390: PetscFree(len_ri);
1391: PetscFree(swaits);
1392: PetscFree(buf_s);
1394: /* (5) compute the local portion of C */
1395: /* ------------------------------------------ */
1396: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */
1397: PetscFreeSpaceGet(Crmax,&free_space);
1398: current_space = free_space;
1400: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
1401: for (k=0; k<nrecv; k++) {
1402: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1403: nrows = *buf_ri_k[k];
1404: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1405: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1406: }
1408: MatPreallocateInitialize(comm,pn,an,dnz,onz);
1409: PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);
1410: for (i=0; i<pn; i++) {
1411: /* add C_loc into C */
1412: nzi = c_loc->i[i+1] - c_loc->i[i];
1413: Jptr = c_loc->j + c_loc->i[i];
1414: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1416: /* add received col data into lnk */
1417: for (k=0; k<nrecv; k++) { /* k-th received message */
1418: if (i == *nextrow[k]) { /* i-th row */
1419: nzi = *(nextci[k]+1) - *nextci[k];
1420: Jptr = buf_rj[k] + *nextci[k];
1421: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1422: nextrow[k]++; nextci[k]++;
1423: }
1424: }
1425: nzi = lnk[0];
1427: /* copy data into free space, then initialize lnk */
1428: PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);
1429: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
1430: }
1431: PetscFree3(buf_ri_k,nextrow,nextci);
1432: PetscLLDestroy(lnk,lnkbt);
1433: PetscFreeSpaceDestroy(free_space);
1435: /* local sizes and preallocation */
1436: MatSetSizes(C,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);
1437: if (P->cmap->bs > 0) {PetscLayoutSetBlockSize(C->rmap,P->cmap->bs);}
1438: if (A->cmap->bs > 0) {PetscLayoutSetBlockSize(C->cmap,A->cmap->bs);}
1439: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1440: MatPreallocateFinalize(dnz,onz);
1442: /* add C_loc and C_oth to C */
1443: MatGetOwnershipRange(C,&rstart,NULL);
1444: for (i=0; i<pn; i++) {
1445: const PetscInt ncols = c_loc->i[i+1] - c_loc->i[i];
1446: const PetscInt *cols = c_loc->j + c_loc->i[i];
1447: const PetscInt row = rstart + i;
1448: MatSetValues(C,1,&row,ncols,cols,NULL,INSERT_VALUES);
1449: }
1450: for (i=0; i<con; i++) {
1451: const PetscInt ncols = c_oth->i[i+1] - c_oth->i[i];
1452: const PetscInt *cols = c_oth->j + c_oth->i[i];
1453: const PetscInt row = prmap[i];
1454: MatSetValues(C,1,&row,ncols,cols,NULL,INSERT_VALUES);
1455: }
1456: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1457: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1458: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1460: /* members in merge */
1461: PetscFree(id_r);
1462: PetscFree(len_r);
1463: PetscFree(buf_ri[0]);
1464: PetscFree(buf_ri);
1465: PetscFree(buf_rj[0]);
1466: PetscFree(buf_rj);
1467: PetscLayoutDestroy(&rowmap);
1469: /* attach the supporting struct to C for reuse */
1470: C->product->data = ptap;
1471: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
1472: return(0);
1473: }
1475: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C)
1476: {
1477: PetscErrorCode ierr;
1478: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
1479: Mat_SeqAIJ *c_seq;
1480: Mat_APMPI *ptap;
1481: Mat A_loc,C_loc,C_oth;
1482: PetscInt i,rstart,rend,cm,ncols,row;
1483: const PetscInt *cols;
1484: const PetscScalar *vals;
1487: MatCheckProduct(C,3);
1488: ptap = (Mat_APMPI*)C->product->data;
1489: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1490: if (!ptap->A_loc) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1491: MatZeroEntries(C);
1493: if (ptap->reuse == MAT_REUSE_MATRIX) {
1494: /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1495: /* 1) get R = Pd^T, Ro = Po^T */
1496: /*----------------------------*/
1497: MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
1498: MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
1500: /* 2) compute numeric A_loc */
1501: /*--------------------------*/
1502: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);
1503: }
1505: /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1506: A_loc = ptap->A_loc;
1507: ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);
1508: ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);
1509: C_loc = ptap->C_loc;
1510: C_oth = ptap->C_oth;
1512: /* add C_loc and C_oth to C */
1513: MatGetOwnershipRange(C,&rstart,&rend);
1515: /* C_loc -> C */
1516: cm = C_loc->rmap->N;
1517: c_seq = (Mat_SeqAIJ*)C_loc->data;
1518: cols = c_seq->j;
1519: vals = c_seq->a;
1520: for (i=0; i<cm; i++) {
1521: ncols = c_seq->i[i+1] - c_seq->i[i];
1522: row = rstart + i;
1523: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1524: cols += ncols; vals += ncols;
1525: }
1527: /* Co -> C, off-processor part */
1528: cm = C_oth->rmap->N;
1529: c_seq = (Mat_SeqAIJ*)C_oth->data;
1530: cols = c_seq->j;
1531: vals = c_seq->a;
1532: for (i=0; i<cm; i++) {
1533: ncols = c_seq->i[i+1] - c_seq->i[i];
1534: row = p->garray[i];
1535: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1536: cols += ncols; vals += ncols;
1537: }
1538: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1539: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1540: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1542: ptap->reuse = MAT_REUSE_MATRIX;
1543: return(0);
1544: }
1546: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C)
1547: {
1548: PetscErrorCode ierr;
1549: Mat_Merge_SeqsToMPI *merge;
1550: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
1551: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1552: Mat_APMPI *ptap;
1553: PetscInt *adj;
1554: PetscInt i,j,k,anz,pnz,row,*cj,nexta;
1555: MatScalar *ada,*ca,valtmp;
1556: PetscInt am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1557: MPI_Comm comm;
1558: PetscMPIInt size,rank,taga,*len_s;
1559: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1560: PetscInt **buf_ri,**buf_rj;
1561: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1562: MPI_Request *s_waits,*r_waits;
1563: MPI_Status *status;
1564: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
1565: PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ;
1566: Mat A_loc;
1567: Mat_SeqAIJ *a_loc;
1570: MatCheckProduct(C,3);
1571: ptap = (Mat_APMPI*)C->product->data;
1572: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1573: if (!ptap->A_loc) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1574: PetscObjectGetComm((PetscObject)C,&comm);
1575: MPI_Comm_size(comm,&size);
1576: MPI_Comm_rank(comm,&rank);
1578: merge = ptap->merge;
1580: /* 2) compute numeric C_seq = P_loc^T*A_loc */
1581: /*------------------------------------------*/
1582: /* get data from symbolic products */
1583: coi = merge->coi; coj = merge->coj;
1584: PetscCalloc1(coi[pon]+1,&coa);
1585: bi = merge->bi; bj = merge->bj;
1586: owners = merge->rowmap->range;
1587: PetscCalloc1(bi[cm]+1,&ba);
1589: /* get A_loc by taking all local rows of A */
1590: A_loc = ptap->A_loc;
1591: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);
1592: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1593: ai = a_loc->i;
1594: aj = a_loc->j;
1596: for (i=0; i<am; i++) {
1597: anz = ai[i+1] - ai[i];
1598: adj = aj + ai[i];
1599: ada = a_loc->a + ai[i];
1601: /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1602: /*-------------------------------------------------------------*/
1603: /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1604: pnz = po->i[i+1] - po->i[i];
1605: poJ = po->j + po->i[i];
1606: pA = po->a + po->i[i];
1607: for (j=0; j<pnz; j++) {
1608: row = poJ[j];
1609: cj = coj + coi[row];
1610: ca = coa + coi[row];
1611: /* perform sparse axpy */
1612: nexta = 0;
1613: valtmp = pA[j];
1614: for (k=0; nexta<anz; k++) {
1615: if (cj[k] == adj[nexta]) {
1616: ca[k] += valtmp*ada[nexta];
1617: nexta++;
1618: }
1619: }
1620: PetscLogFlops(2.0*anz);
1621: }
1623: /* put the value into Cd (diagonal part) */
1624: pnz = pd->i[i+1] - pd->i[i];
1625: pdJ = pd->j + pd->i[i];
1626: pA = pd->a + pd->i[i];
1627: for (j=0; j<pnz; j++) {
1628: row = pdJ[j];
1629: cj = bj + bi[row];
1630: ca = ba + bi[row];
1631: /* perform sparse axpy */
1632: nexta = 0;
1633: valtmp = pA[j];
1634: for (k=0; nexta<anz; k++) {
1635: if (cj[k] == adj[nexta]) {
1636: ca[k] += valtmp*ada[nexta];
1637: nexta++;
1638: }
1639: }
1640: PetscLogFlops(2.0*anz);
1641: }
1642: }
1644: /* 3) send and recv matrix values coa */
1645: /*------------------------------------*/
1646: buf_ri = merge->buf_ri;
1647: buf_rj = merge->buf_rj;
1648: len_s = merge->len_s;
1649: PetscCommGetNewTag(comm,&taga);
1650: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
1652: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
1653: for (proc=0,k=0; proc<size; proc++) {
1654: if (!len_s[proc]) continue;
1655: i = merge->owners_co[proc];
1656: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
1657: k++;
1658: }
1659: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
1660: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
1662: PetscFree2(s_waits,status);
1663: PetscFree(r_waits);
1664: PetscFree(coa);
1666: /* 4) insert local Cseq and received values into Cmpi */
1667: /*----------------------------------------------------*/
1668: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1669: for (k=0; k<merge->nrecv; k++) {
1670: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1671: nrows = *(buf_ri_k[k]);
1672: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
1673: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1674: }
1676: for (i=0; i<cm; i++) {
1677: row = owners[rank] + i; /* global row index of C_seq */
1678: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1679: ba_i = ba + bi[i];
1680: bnz = bi[i+1] - bi[i];
1681: /* add received vals into ba */
1682: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1683: /* i-th row */
1684: if (i == *nextrow[k]) {
1685: cnz = *(nextci[k]+1) - *nextci[k];
1686: cj = buf_rj[k] + *(nextci[k]);
1687: ca = abuf_r[k] + *(nextci[k]);
1688: nextcj = 0;
1689: for (j=0; nextcj<cnz; j++) {
1690: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1691: ba_i[j] += ca[nextcj++];
1692: }
1693: }
1694: nextrow[k]++; nextci[k]++;
1695: PetscLogFlops(2.0*cnz);
1696: }
1697: }
1698: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
1699: }
1700: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1701: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1703: PetscFree(ba);
1704: PetscFree(abuf_r[0]);
1705: PetscFree(abuf_r);
1706: PetscFree3(buf_ri_k,nextrow,nextci);
1707: return(0);
1708: }
1710: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat C)
1711: {
1712: PetscErrorCode ierr;
1713: Mat A_loc,POt,PDt;
1714: Mat_APMPI *ptap;
1715: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1716: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data;
1717: PetscInt *pdti,*pdtj,*poti,*potj,*ptJ;
1718: PetscInt nnz;
1719: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1720: PetscInt am =A->rmap->n,pn=P->cmap->n;
1721: MPI_Comm comm;
1722: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1723: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1724: PetscInt len,proc,*dnz,*onz,*owners;
1725: PetscInt nzi,*bi,*bj;
1726: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1727: MPI_Request *swaits,*rwaits;
1728: MPI_Status *sstatus,rstatus;
1729: Mat_Merge_SeqsToMPI *merge;
1730: PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1731: PetscReal afill =1.0,afill_tmp;
1732: PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax;
1733: Mat_SeqAIJ *a_loc,*pdt,*pot;
1734: PetscTable ta;
1735: MatType mtype;
1738: PetscObjectGetComm((PetscObject)A,&comm);
1739: /* check if matrix local sizes are compatible */
1740: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1742: MPI_Comm_size(comm,&size);
1743: MPI_Comm_rank(comm,&rank);
1745: /* create struct Mat_APMPI and attached it to C later */
1746: PetscNew(&ptap);
1748: /* get A_loc by taking all local rows of A */
1749: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);
1751: ptap->A_loc = A_loc;
1752: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1753: ai = a_loc->i;
1754: aj = a_loc->j;
1756: /* determine symbolic Co=(p->B)^T*A - send to others */
1757: /*----------------------------------------------------*/
1758: MatTransposeSymbolic_SeqAIJ(p->A,&PDt);
1759: pdt = (Mat_SeqAIJ*)PDt->data;
1760: pdti = pdt->i; pdtj = pdt->j;
1762: MatTransposeSymbolic_SeqAIJ(p->B,&POt);
1763: pot = (Mat_SeqAIJ*)POt->data;
1764: poti = pot->i; potj = pot->j;
1766: /* then, compute symbolic Co = (p->B)^T*A */
1767: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1768: >= (num of nonzero rows of C_seq) - pn */
1769: PetscMalloc1(pon+1,&coi);
1770: coi[0] = 0;
1772: /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1773: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am]));
1774: PetscFreeSpaceGet(nnz,&free_space);
1775: current_space = free_space;
1777: /* create and initialize a linked list */
1778: PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);
1779: MatRowMergeMax_SeqAIJ(a_loc,am,ta);
1780: PetscTableGetCount(ta,&Armax);
1782: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1784: for (i=0; i<pon; i++) {
1785: pnz = poti[i+1] - poti[i];
1786: ptJ = potj + poti[i];
1787: for (j=0; j<pnz; j++) {
1788: row = ptJ[j]; /* row of A_loc == col of Pot */
1789: anz = ai[row+1] - ai[row];
1790: Jptr = aj + ai[row];
1791: /* add non-zero cols of AP into the sorted linked list lnk */
1792: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1793: }
1794: nnz = lnk[0];
1796: /* If free space is not available, double the total space in the list */
1797: if (current_space->local_remaining<nnz) {
1798: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1799: nspacedouble++;
1800: }
1802: /* Copy data into free space, and zero out denserows */
1803: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1805: current_space->array += nnz;
1806: current_space->local_used += nnz;
1807: current_space->local_remaining -= nnz;
1809: coi[i+1] = coi[i] + nnz;
1810: }
1812: PetscMalloc1(coi[pon]+1,&coj);
1813: PetscFreeSpaceContiguous(&free_space,coj);
1814: PetscLLCondensedDestroy_Scalable(lnk); /* must destroy to get a new one for C */
1816: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1817: if (afill_tmp > afill) afill = afill_tmp;
1819: /* send j-array (coj) of Co to other processors */
1820: /*----------------------------------------------*/
1821: /* determine row ownership */
1822: PetscNew(&merge);
1823: PetscLayoutCreate(comm,&merge->rowmap);
1825: merge->rowmap->n = pn;
1826: merge->rowmap->bs = 1;
1828: PetscLayoutSetUp(merge->rowmap);
1829: owners = merge->rowmap->range;
1831: /* determine the number of messages to send, their lengths */
1832: PetscCalloc1(size,&len_si);
1833: PetscCalloc1(size,&merge->len_s);
1835: len_s = merge->len_s;
1836: merge->nsend = 0;
1838: PetscMalloc1(size+2,&owners_co);
1840: proc = 0;
1841: for (i=0; i<pon; i++) {
1842: while (prmap[i] >= owners[proc+1]) proc++;
1843: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1844: len_s[proc] += coi[i+1] - coi[i];
1845: }
1847: len = 0; /* max length of buf_si[] */
1848: owners_co[0] = 0;
1849: for (proc=0; proc<size; proc++) {
1850: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1851: if (len_si[proc]) {
1852: merge->nsend++;
1853: len_si[proc] = 2*(len_si[proc] + 1);
1854: len += len_si[proc];
1855: }
1856: }
1858: /* determine the number and length of messages to receive for coi and coj */
1859: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
1860: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
1862: /* post the Irecv and Isend of coj */
1863: PetscCommGetNewTag(comm,&tagj);
1864: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
1865: PetscMalloc1(merge->nsend+1,&swaits);
1866: for (proc=0, k=0; proc<size; proc++) {
1867: if (!len_s[proc]) continue;
1868: i = owners_co[proc];
1869: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1870: k++;
1871: }
1873: /* receives and sends of coj are complete */
1874: PetscMalloc1(size,&sstatus);
1875: for (i=0; i<merge->nrecv; i++) {
1876: PetscMPIInt icompleted;
1877: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1878: }
1879: PetscFree(rwaits);
1880: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1882: /* add received column indices into table to update Armax */
1883: /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1884: for (k=0; k<merge->nrecv; k++) {/* k-th received message */
1885: Jptr = buf_rj[k];
1886: for (j=0; j<merge->len_r[k]; j++) {
1887: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1888: }
1889: }
1890: PetscTableGetCount(ta,&Armax);
1891: /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */
1893: /* send and recv coi */
1894: /*-------------------*/
1895: PetscCommGetNewTag(comm,&tagi);
1896: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
1897: PetscMalloc1(len+1,&buf_s);
1898: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1899: for (proc=0,k=0; proc<size; proc++) {
1900: if (!len_s[proc]) continue;
1901: /* form outgoing message for i-structure:
1902: buf_si[0]: nrows to be sent
1903: [1:nrows]: row index (global)
1904: [nrows+1:2*nrows+1]: i-structure index
1905: */
1906: /*-------------------------------------------*/
1907: nrows = len_si[proc]/2 - 1;
1908: buf_si_i = buf_si + nrows+1;
1909: buf_si[0] = nrows;
1910: buf_si_i[0] = 0;
1911: nrows = 0;
1912: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1913: nzi = coi[i+1] - coi[i];
1914: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1915: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1916: nrows++;
1917: }
1918: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1919: k++;
1920: buf_si += len_si[proc];
1921: }
1922: i = merge->nrecv;
1923: while (i--) {
1924: PetscMPIInt icompleted;
1925: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1926: }
1927: PetscFree(rwaits);
1928: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1929: PetscFree(len_si);
1930: PetscFree(len_ri);
1931: PetscFree(swaits);
1932: PetscFree(sstatus);
1933: PetscFree(buf_s);
1935: /* compute the local portion of C (mpi mat) */
1936: /*------------------------------------------*/
1937: /* allocate bi array and free space for accumulating nonzero column info */
1938: PetscMalloc1(pn+1,&bi);
1939: bi[0] = 0;
1941: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1942: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am])));
1943: PetscFreeSpaceGet(nnz,&free_space);
1944: current_space = free_space;
1946: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1947: for (k=0; k<merge->nrecv; k++) {
1948: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1949: nrows = *buf_ri_k[k];
1950: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1951: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure */
1952: }
1954: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1955: MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);
1956: rmax = 0;
1957: for (i=0; i<pn; i++) {
1958: /* add pdt[i,:]*AP into lnk */
1959: pnz = pdti[i+1] - pdti[i];
1960: ptJ = pdtj + pdti[i];
1961: for (j=0; j<pnz; j++) {
1962: row = ptJ[j]; /* row of AP == col of Pt */
1963: anz = ai[row+1] - ai[row];
1964: Jptr = aj + ai[row];
1965: /* add non-zero cols of AP into the sorted linked list lnk */
1966: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1967: }
1969: /* add received col data into lnk */
1970: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1971: if (i == *nextrow[k]) { /* i-th row */
1972: nzi = *(nextci[k]+1) - *nextci[k];
1973: Jptr = buf_rj[k] + *nextci[k];
1974: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
1975: nextrow[k]++; nextci[k]++;
1976: }
1977: }
1978: nnz = lnk[0];
1980: /* if free space is not available, make more free space */
1981: if (current_space->local_remaining<nnz) {
1982: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1983: nspacedouble++;
1984: }
1985: /* copy data into free space, then initialize lnk */
1986: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1987: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
1989: current_space->array += nnz;
1990: current_space->local_used += nnz;
1991: current_space->local_remaining -= nnz;
1993: bi[i+1] = bi[i] + nnz;
1994: if (nnz > rmax) rmax = nnz;
1995: }
1996: PetscFree3(buf_ri_k,nextrow,nextci);
1998: PetscMalloc1(bi[pn]+1,&bj);
1999: PetscFreeSpaceContiguous(&free_space,bj);
2000: afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
2001: if (afill_tmp > afill) afill = afill_tmp;
2002: PetscLLCondensedDestroy_Scalable(lnk);
2003: PetscTableDestroy(&ta);
2005: MatDestroy(&POt);
2006: MatDestroy(&PDt);
2008: /* create symbolic parallel matrix C - why cannot be assembled in Numeric part */
2009: /*-------------------------------------------------------------------------------*/
2010: MatSetSizes(C,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);
2011: MatSetBlockSizes(C,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));
2012: MatGetType(A,&mtype);
2013: MatSetType(C,mtype);
2014: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
2015: MatPreallocateFinalize(dnz,onz);
2016: MatSetBlockSize(C,1);
2017: MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
2018: for (i=0; i<pn; i++) {
2019: row = i + rstart;
2020: nnz = bi[i+1] - bi[i];
2021: Jptr = bj + bi[i];
2022: MatSetValues(C,1,&row,nnz,Jptr,NULL,INSERT_VALUES);
2023: }
2024: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2025: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2026: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
2027: merge->bi = bi;
2028: merge->bj = bj;
2029: merge->coi = coi;
2030: merge->coj = coj;
2031: merge->buf_ri = buf_ri;
2032: merge->buf_rj = buf_rj;
2033: merge->owners_co = owners_co;
2035: /* attach the supporting struct to C for reuse */
2036: C->product->data = ptap;
2037: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2038: ptap->merge = merge;
2040: C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2042: #if defined(PETSC_USE_INFO)
2043: if (bi[pn] != 0) {
2044: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
2045: PetscInfo1(C,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);
2046: } else {
2047: PetscInfo(C,"Empty matrix product\n");
2048: }
2049: #endif
2050: return(0);
2051: }
2053: /* ---------------------------------------------------------------- */
2054: static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2055: {
2057: Mat_Product *product = C->product;
2058: Mat A=product->A,B=product->B;
2059: PetscReal fill=product->fill;
2060: PetscBool flg;
2063: /* scalable */
2064: PetscStrcmp(product->alg,"scalable",&flg);
2065: if (flg) {
2066: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
2067: goto next;
2068: }
2070: /* nonscalable */
2071: PetscStrcmp(product->alg,"nonscalable",&flg);
2072: if (flg) {
2073: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
2074: goto next;
2075: }
2077: /* matmatmult */
2078: PetscStrcmp(product->alg,"at*b",&flg);
2079: if (flg) {
2080: Mat At;
2081: Mat_APMPI *ptap;
2083: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
2084: MatMatMultSymbolic_MPIAIJ_MPIAIJ(At,B,fill,C);
2085: ptap = (Mat_APMPI*)C->product->data;
2086: if (ptap) {
2087: ptap->Pt = At;
2088: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2089: }
2090: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2091: goto next;
2092: }
2094: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type is not supported");
2096: next:
2097: C->ops->productnumeric = MatProductNumeric_AtB;
2098: return(0);
2099: }
2101: /* ---------------------------------------------------------------- */
2102: /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2103: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2104: {
2106: Mat_Product *product = C->product;
2107: Mat A=product->A,B=product->B;
2108: #if defined(PETSC_HAVE_HYPRE)
2109: const char *algTypes[4] = {"scalable","nonscalable","seqmpi","hypre"};
2110: PetscInt nalg = 4;
2111: #else
2112: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2113: PetscInt nalg = 3;
2114: #endif
2115: PetscInt alg = 1; /* set nonscalable algorithm as default */
2116: PetscBool flg;
2117: MPI_Comm comm;
2120: /* Check matrix local sizes */
2121: PetscObjectGetComm((PetscObject)C,&comm);
2122: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
2124: /* Set "nonscalable" as default algorithm */
2125: PetscStrcmp(C->product->alg,"default",&flg);
2126: if (flg) {
2127: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2129: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2130: if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2131: MatInfo Ainfo,Binfo;
2132: PetscInt nz_local;
2133: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2135: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2136: MatGetInfo(B,MAT_LOCAL,&Binfo);
2137: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2139: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2140: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2142: if (alg_scalable) {
2143: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2144: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2145: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);
2146: }
2147: }
2148: }
2150: /* Get runtime option */
2151: if (product->api_user) {
2152: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
2153: PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2154: PetscOptionsEnd();
2155: } else {
2156: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
2157: PetscOptionsEList("-matproduct_ab_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2158: PetscOptionsEnd();
2159: }
2160: if (flg) {
2161: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2162: }
2164: C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2165: return(0);
2166: }
2168: /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2169: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2170: {
2172: Mat_Product *product = C->product;
2173: Mat A=product->A,B=product->B;
2174: const char *algTypes[3] = {"scalable","nonscalable","at*b"};
2175: PetscInt nalg = 3;
2176: PetscInt alg = 1; /* set default algorithm */
2177: PetscBool flg;
2178: MPI_Comm comm;
2181: /* Check matrix local sizes */
2182: PetscObjectGetComm((PetscObject)C,&comm);
2183: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
2185: /* Set default algorithm */
2186: PetscStrcmp(C->product->alg,"default",&flg);
2187: if (flg) {
2188: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2189: }
2191: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2192: if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2193: MatInfo Ainfo,Binfo;
2194: PetscInt nz_local;
2195: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2197: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2198: MatGetInfo(B,MAT_LOCAL,&Binfo);
2199: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2201: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2202: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2204: if (alg_scalable) {
2205: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2206: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2207: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);
2208: }
2209: }
2211: /* Get runtime option */
2212: if (product->api_user) {
2213: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatTransposeMatMult","Mat");
2214: PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2215: PetscOptionsEnd();
2216: } else {
2217: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AtB","Mat");
2218: PetscOptionsEList("-matproduct_atb_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2219: PetscOptionsEnd();
2220: }
2221: if (flg) {
2222: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2223: }
2225: C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2226: return(0);
2227: }
2229: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2230: {
2232: Mat_Product *product = C->product;
2233: Mat A=product->A,P=product->B;
2234: MPI_Comm comm;
2235: PetscBool flg;
2236: PetscInt alg=1; /* set default algorithm */
2237: #if !defined(PETSC_HAVE_HYPRE)
2238: const char *algTypes[4] = {"scalable","nonscalable","allatonce","allatonce_merged"};
2239: PetscInt nalg=4;
2240: #else
2241: const char *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","hypre"};
2242: PetscInt nalg=5;
2243: #endif
2244: PetscInt pN=P->cmap->N;
2247: /* Check matrix local sizes */
2248: PetscObjectGetComm((PetscObject)C,&comm);
2249: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
2250: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
2252: /* Set "nonscalable" as default algorithm */
2253: PetscStrcmp(C->product->alg,"default",&flg);
2254: if (flg) {
2255: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2257: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2258: if (pN > 100000) {
2259: MatInfo Ainfo,Pinfo;
2260: PetscInt nz_local;
2261: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2263: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2264: MatGetInfo(P,MAT_LOCAL,&Pinfo);
2265: nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2267: if (pN > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2268: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2270: if (alg_scalable) {
2271: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2272: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2273: }
2274: }
2275: }
2277: /* Get runtime option */
2278: if (product->api_user) {
2279: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
2280: PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2281: PetscOptionsEnd();
2282: } else {
2283: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
2284: PetscOptionsEList("-matproduct_ptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2285: PetscOptionsEnd();
2286: }
2287: if (flg) {
2288: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2289: }
2291: C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2292: return(0);
2293: }
2295: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2296: {
2297: Mat_Product *product = C->product;
2298: Mat A = product->A,R=product->B;
2301: /* Check matrix local sizes */
2302: if (A->cmap->n != R->cmap->n || A->rmap->n != R->cmap->n) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A local (%D, %D), R local (%D,%D)",A->rmap->n,A->rmap->n,R->rmap->n,R->cmap->n);
2304: C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2305: return(0);
2306: }
2308: /*
2309: Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2310: */
2311: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2312: {
2314: Mat_Product *product = C->product;
2315: PetscBool flg = PETSC_FALSE;
2316: PetscInt alg = 1; /* default algorithm */
2317: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2318: PetscInt nalg = 3;
2321: /* Set default algorithm */
2322: PetscStrcmp(C->product->alg,"default",&flg);
2323: if (flg) {
2324: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2325: }
2327: /* Get runtime option */
2328: if (product->api_user) {
2329: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMatMult","Mat");
2330: PetscOptionsEList("-matmatmatmult_via","Algorithmic approach","MatMatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2331: PetscOptionsEnd();
2332: } else {
2333: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_ABC","Mat");
2334: PetscOptionsEList("-matproduct_abc_via","Algorithmic approach","MatProduct_ABC",algTypes,nalg,algTypes[alg],&alg,&flg);
2335: PetscOptionsEnd();
2336: }
2337: if (flg) {
2338: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2339: }
2341: C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2342: C->ops->productsymbolic = MatProductSymbolic_ABC;
2343: return(0);
2344: }
2346: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2347: {
2349: Mat_Product *product = C->product;
2352: switch (product->type) {
2353: case MATPRODUCT_AB:
2354: MatProductSetFromOptions_MPIAIJ_AB(C);
2355: break;
2356: case MATPRODUCT_AtB:
2357: MatProductSetFromOptions_MPIAIJ_AtB(C);
2358: break;
2359: case MATPRODUCT_PtAP:
2360: MatProductSetFromOptions_MPIAIJ_PtAP(C);
2361: break;
2362: case MATPRODUCT_RARt:
2363: MatProductSetFromOptions_MPIAIJ_RARt(C);
2364: break;
2365: case MATPRODUCT_ABC:
2366: MatProductSetFromOptions_MPIAIJ_ABC(C);
2367: break;
2368: default:
2369: break;
2370: }
2371: return(0);
2372: }