Actual source code: mpiptap.c
petsc-3.11.0 2019-03-29
2: /*
3: Defines projective product routines where A is a MPIAIJ matrix
4: C = P^T * A * P
5: */
7: #include <../src/mat/impls/aij/seq/aij.h>
8: #include <../src/mat/utils/freespace.h>
9: #include <../src/mat/impls/aij/mpi/mpiaij.h>
10: #include <petscbt.h>
11: #include <petsctime.h>
13: /* #define PTAP_PROFILE */
15: PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16: {
17: PetscErrorCode ierr;
18: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
19: Mat_APMPI *ptap=a->ap;
20: PetscBool iascii;
21: PetscViewerFormat format;
24: if (!ptap) {
25: /* hack: MatDuplicate() sets oldmat->ops->view to newmat which is a base mat class with null ptpa! */
26: A->ops->view = MatView_MPIAIJ;
27: (A->ops->view)(A,viewer);
28: return(0);
29: }
31: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
32: if (iascii) {
33: PetscViewerGetFormat(viewer,&format);
34: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
35: if (ptap->algType == 0) {
36: PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");
37: } else if (ptap->algType == 1) {
38: PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");
39: }
40: }
41: }
42: (ptap->view)(A,viewer);
43: return(0);
44: }
46: PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat A)
47: {
48: PetscErrorCode ierr;
49: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
50: Mat_APMPI *ptap=a->ap;
51: Mat_Merge_SeqsToMPI *merge;
54: if (!ptap) return(0);
56: PetscFree2(ptap->startsj_s,ptap->startsj_r);
57: PetscFree(ptap->bufa);
58: MatDestroy(&ptap->P_loc);
59: MatDestroy(&ptap->P_oth);
60: MatDestroy(&ptap->A_loc); /* used by MatTransposeMatMult() */
61: MatDestroy(&ptap->Rd);
62: MatDestroy(&ptap->Ro);
63: if (ptap->AP_loc) { /* used by alg_rap */
64: Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
65: PetscFree(ap->i);
66: PetscFree2(ap->j,ap->a);
67: MatDestroy(&ptap->AP_loc);
68: } else { /* used by alg_ptap */
69: PetscFree(ptap->api);
70: PetscFree(ptap->apj);
71: }
72: MatDestroy(&ptap->C_loc);
73: MatDestroy(&ptap->C_oth);
74: if (ptap->apa) {PetscFree(ptap->apa);}
76: MatDestroy(&ptap->Pt);
78: merge=ptap->merge;
79: if (merge) { /* used by alg_ptap */
80: PetscFree(merge->id_r);
81: PetscFree(merge->len_s);
82: PetscFree(merge->len_r);
83: PetscFree(merge->bi);
84: PetscFree(merge->bj);
85: PetscFree(merge->buf_ri[0]);
86: PetscFree(merge->buf_ri);
87: PetscFree(merge->buf_rj[0]);
88: PetscFree(merge->buf_rj);
89: PetscFree(merge->coi);
90: PetscFree(merge->coj);
91: PetscFree(merge->owners_co);
92: PetscLayoutDestroy(&merge->rowmap);
93: PetscFree(ptap->merge);
94: }
95: ISLocalToGlobalMappingDestroy(&ptap->ltog);
97: A->ops->destroy = ptap->destroy;
98: PetscFree(a->ap);
99: return(0);
100: }
102: PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
103: {
107: (*A->ops->freeintermediatedatastructures)(A);
108: (*A->ops->destroy)(A);
109: return(0);
110: }
112: PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
113: {
115: PetscBool flg;
116: MPI_Comm comm;
117: #if !defined(PETSC_HAVE_HYPRE)
118: const char *algTypes[2] = {"scalable","nonscalable"};
119: PetscInt nalg=2;
120: #else
121: const char *algTypes[3] = {"scalable","nonscalable","hypre"};
122: PetscInt nalg=3;
123: #endif
124: PetscInt pN=P->cmap->N,alg=1; /* set default algorithm */
127: /* check if matrix local sizes are compatible */
128: PetscObjectGetComm((PetscObject)A,&comm);
129: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
130: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
132: if (scall == MAT_INITIAL_MATRIX) {
133: /* pick an algorithm */
134: PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");
135: PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
136: PetscOptionsEnd();
138: if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
139: MatInfo Ainfo,Pinfo;
140: PetscInt nz_local;
141: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
143: MatGetInfo(A,MAT_LOCAL,&Ainfo);
144: MatGetInfo(P,MAT_LOCAL,&Pinfo);
145: nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
147: if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
148: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
150: if (alg_scalable) {
151: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
152: }
153: }
155: switch (alg) {
156: case 1:
157: /* do R=P^T locally, then C=R*A*P -- nonscalable */
158: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
159: MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);
160: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
161: break;
162: #if defined(PETSC_HAVE_HYPRE)
163: case 2:
164: /* Use boomerAMGBuildCoarseOperator */
165: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
166: MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);
167: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
168: break;
169: #endif
170: default:
171: /* do R=P^T locally, then C=R*A*P */
172: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
173: MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);
174: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
175: break;
176: }
178: if (alg == 0 || alg == 1) {
179: Mat_MPIAIJ *c = (Mat_MPIAIJ*)(*C)->data;
180: Mat_APMPI *ap = c->ap;
181: PetscOptionsBegin(PetscObjectComm((PetscObject)(*C)),((PetscObject)(*C))->prefix,"MatFreeIntermediateDataStructures","Mat");
182: ap->freestruct = PETSC_FALSE;
183: PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",ap->freestruct,&ap->freestruct, NULL);
184: PetscOptionsEnd();
185: }
186: }
188: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
189: (*(*C)->ops->ptapnumeric)(A,P,*C);
190: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
191: return(0);
192: }
194: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
195: {
196: PetscErrorCode ierr;
197: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
198: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
199: Mat_SeqAIJ *ap,*p_loc,*p_oth,*c_seq;
200: Mat_APMPI *ptap = c->ap;
201: Mat AP_loc,C_loc,C_oth;
202: PetscInt i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz,nout;
203: PetscScalar *apa;
204: const PetscInt *cols;
205: const PetscScalar *vals;
208: if (!ptap) {
209: MPI_Comm comm;
210: PetscObjectGetComm((PetscObject)C,&comm);
211: SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
212: }
214: MatZeroEntries(C);
216: /* 1) get R = Pd^T,Ro = Po^T */
217: if (ptap->reuse == MAT_REUSE_MATRIX) {
218: MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
219: MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
220: }
222: /* 2) get AP_loc */
223: AP_loc = ptap->AP_loc;
224: ap = (Mat_SeqAIJ*)AP_loc->data;
226: /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
227: /*-----------------------------------------------------*/
228: if (ptap->reuse == MAT_REUSE_MATRIX) {
229: /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
230: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
231: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
232: }
234: /* 2-2) compute numeric A_loc*P - dominating part */
235: /* ---------------------------------------------- */
236: /* get data from symbolic products */
237: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
238: if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
239: else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
241: api = ap->i;
242: apj = ap->j;
243: ISLocalToGlobalMappingApply(ptap->ltog,api[AP_loc->rmap->n],apj,apj);
244: for (i=0; i<am; i++) {
245: /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
246: apnz = api[i+1] - api[i];
247: apa = ap->a + api[i];
248: PetscMemzero(apa,sizeof(PetscScalar)*apnz);
249: AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
250: PetscLogFlops(2.0*apnz);
251: }
252: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,api[AP_loc->rmap->n],apj,&nout,apj);
253: if (api[AP_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",api[AP_loc->rmap->n],nout);
255: /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
256: /* Always use scalable version since we are in the MPI scalable version */
257: MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);
258: MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);
260: C_loc = ptap->C_loc;
261: C_oth = ptap->C_oth;
263: /* add C_loc and Co to to C */
264: MatGetOwnershipRange(C,&rstart,&rend);
266: /* C_loc -> C */
267: cm = C_loc->rmap->N;
268: c_seq = (Mat_SeqAIJ*)C_loc->data;
269: cols = c_seq->j;
270: vals = c_seq->a;
271: ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_loc->rmap->n],c_seq->j,c_seq->j);
273: /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
274: /* when there are no off-processor parts. */
275: /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
276: /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
277: /* a table, and other, more complex stuff has to be done. */
278: if (C->assembled) {
279: C->was_assembled = PETSC_TRUE;
280: C->assembled = PETSC_FALSE;
281: }
282: if (C->was_assembled) {
283: for (i=0; i<cm; i++) {
284: ncols = c_seq->i[i+1] - c_seq->i[i];
285: row = rstart + i;
286: MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);
287: cols += ncols; vals += ncols;
288: }
289: } else {
290: MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);
291: }
292: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_loc->rmap->n],c_seq->j,&nout,c_seq->j);
293: if (c_seq->i[C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout);
295: /* Co -> C, off-processor part */
296: cm = C_oth->rmap->N;
297: c_seq = (Mat_SeqAIJ*)C_oth->data;
298: cols = c_seq->j;
299: vals = c_seq->a;
300: ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_oth->rmap->n],c_seq->j,c_seq->j);
301: for (i=0; i<cm; i++) {
302: ncols = c_seq->i[i+1] - c_seq->i[i];
303: row = p->garray[i];
304: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
305: cols += ncols; vals += ncols;
306: }
307: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
308: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
310: ptap->reuse = MAT_REUSE_MATRIX;
312: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_oth->rmap->n],c_seq->j,&nout,c_seq->j);
313: if (c_seq->i[C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout);
315: /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
316: if (ptap->freestruct) {
317: MatFreeIntermediateDataStructures(C);
318: }
319: return(0);
320: }
322: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
323: {
324: PetscErrorCode ierr;
325: Mat_APMPI *ptap;
326: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
327: MPI_Comm comm;
328: PetscMPIInt size,rank;
329: Mat Cmpi,P_loc,P_oth;
330: PetscFreeSpaceList free_space=NULL,current_space=NULL;
331: PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
332: PetscInt *lnk,i,k,pnz,row,nsend;
333: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
334: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
335: PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
336: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
337: MPI_Request *swaits,*rwaits;
338: MPI_Status *sstatus,rstatus;
339: PetscLayout rowmap;
340: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
341: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
342: PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi,nout;
343: Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
344: PetscScalar *apv;
345: PetscTable ta;
346: MatType mtype;
347: const char *prefix;
348: #if defined(PETSC_USE_INFO)
349: PetscReal apfill;
350: #endif
353: PetscObjectGetComm((PetscObject)A,&comm);
354: MPI_Comm_size(comm,&size);
355: MPI_Comm_rank(comm,&rank);
357: if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
359: /* create symbolic parallel matrix Cmpi */
360: MatCreate(comm,&Cmpi);
361: MatGetType(A,&mtype);
362: MatSetType(Cmpi,mtype);
364: /* create struct Mat_APMPI and attached it to C later */
365: PetscNew(&ptap);
366: ptap->reuse = MAT_INITIAL_MATRIX;
367: ptap->algType = 0;
369: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
370: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);
371: /* get P_loc by taking all local rows of P */
372: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);
374: ptap->P_loc = P_loc;
375: ptap->P_oth = P_oth;
377: /* (0) compute Rd = Pd^T, Ro = Po^T */
378: /* --------------------------------- */
379: MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
380: MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
382: /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
383: /* ----------------------------------------------------------------- */
384: p_loc = (Mat_SeqAIJ*)P_loc->data;
385: if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
387: /* create and initialize a linked list */
388: PetscTableCreate(pn,pN,&ta); /* for compute AP_loc and Cmpi */
389: MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
390: MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
391: PetscTableGetCount(ta,&Crmax); /* Crmax = nnz(sum of Prows) */
393: PetscLLCondensedCreate_Scalable(Crmax,&lnk);
395: /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
396: if (ao) {
397: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);
398: } else {
399: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);
400: }
401: current_space = free_space;
402: nspacedouble = 0;
404: PetscMalloc1(am+1,&api);
405: api[0] = 0;
406: for (i=0; i<am; i++) {
407: /* diagonal portion: Ad[i,:]*P */
408: ai = ad->i; pi = p_loc->i;
409: nzi = ai[i+1] - ai[i];
410: aj = ad->j + ai[i];
411: for (j=0; j<nzi; j++) {
412: row = aj[j];
413: pnz = pi[row+1] - pi[row];
414: Jptr = p_loc->j + pi[row];
415: /* add non-zero cols of P into the sorted linked list lnk */
416: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
417: }
418: /* off-diagonal portion: Ao[i,:]*P */
419: if (ao) {
420: ai = ao->i; pi = p_oth->i;
421: nzi = ai[i+1] - ai[i];
422: aj = ao->j + ai[i];
423: for (j=0; j<nzi; j++) {
424: row = aj[j];
425: pnz = pi[row+1] - pi[row];
426: Jptr = p_oth->j + pi[row];
427: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
428: }
429: }
430: apnz = lnk[0];
431: api[i+1] = api[i] + apnz;
433: /* if free space is not available, double the total space in the list */
434: if (current_space->local_remaining<apnz) {
435: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
436: nspacedouble++;
437: }
439: /* Copy data into free space, then initialize lnk */
440: PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);
442: current_space->array += apnz;
443: current_space->local_used += apnz;
444: current_space->local_remaining -= apnz;
445: }
446: /* Allocate space for apj and apv, initialize apj, and */
447: /* destroy list of free space and other temporary array(s) */
448: PetscCalloc2(api[am],&apj,api[am],&apv);
449: PetscFreeSpaceContiguous(&free_space,apj);
450: PetscLLCondensedDestroy_Scalable(lnk);
452: /* Create AP_loc for reuse */
453: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);
454: MatSeqAIJCompactOutExtraColumns_SeqAIJ(ptap->AP_loc, &ptap->ltog);
456: #if defined(PETSC_USE_INFO)
457: if (ao) {
458: apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
459: } else {
460: apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
461: }
462: ptap->AP_loc->info.mallocs = nspacedouble;
463: ptap->AP_loc->info.fill_ratio_given = fill;
464: ptap->AP_loc->info.fill_ratio_needed = apfill;
466: if (api[am]) {
467: PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);
468: PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);
469: } else {
470: PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");
471: }
472: #endif
474: /* (2-1) compute symbolic Co = Ro*AP_loc */
475: /* ------------------------------------ */
476: MatGetOptionsPrefix(A,&prefix);
477: MatSetOptionsPrefix(ptap->Ro,prefix);
478: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
479: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);
481: /* (3) send coj of C_oth to other processors */
482: /* ------------------------------------------ */
483: /* determine row ownership */
484: PetscLayoutCreate(comm,&rowmap);
485: rowmap->n = pn;
486: rowmap->bs = 1;
487: PetscLayoutSetUp(rowmap);
488: owners = rowmap->range;
490: /* determine the number of messages to send, their lengths */
491: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
492: PetscMemzero(len_s,size*sizeof(PetscMPIInt));
493: PetscMemzero(len_si,size*sizeof(PetscMPIInt));
495: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
496: coi = c_oth->i; coj = c_oth->j;
497: con = ptap->C_oth->rmap->n;
498: proc = 0;
499: ISLocalToGlobalMappingApply(ptap->ltog,coi[con],coj,coj);
500: for (i=0; i<con; i++) {
501: while (prmap[i] >= owners[proc+1]) proc++;
502: len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */
503: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
504: }
506: len = 0; /* max length of buf_si[], see (4) */
507: owners_co[0] = 0;
508: nsend = 0;
509: for (proc=0; proc<size; proc++) {
510: owners_co[proc+1] = owners_co[proc] + len_si[proc];
511: if (len_s[proc]) {
512: nsend++;
513: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
514: len += len_si[proc];
515: }
516: }
518: /* determine the number and length of messages to receive for coi and coj */
519: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
520: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
522: /* post the Irecv and Isend of coj */
523: PetscCommGetNewTag(comm,&tagj);
524: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
525: PetscMalloc1(nsend+1,&swaits);
526: for (proc=0, k=0; proc<size; proc++) {
527: if (!len_s[proc]) continue;
528: i = owners_co[proc];
529: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
530: k++;
531: }
533: /* (2-2) compute symbolic C_loc = Rd*AP_loc */
534: /* ---------------------------------------- */
535: MatSetOptionsPrefix(ptap->Rd,prefix);
536: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
537: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);
538: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
539: ISLocalToGlobalMappingApply(ptap->ltog,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,c_loc->j);
541: /* receives coj are complete */
542: for (i=0; i<nrecv; i++) {
543: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
544: }
545: PetscFree(rwaits);
546: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
548: /* add received column indices into ta to update Crmax */
549: for (k=0; k<nrecv; k++) {/* k-th received message */
550: Jptr = buf_rj[k];
551: for (j=0; j<len_r[k]; j++) {
552: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
553: }
554: }
555: PetscTableGetCount(ta,&Crmax);
556: PetscTableDestroy(&ta);
558: /* (4) send and recv coi */
559: /*-----------------------*/
560: PetscCommGetNewTag(comm,&tagi);
561: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
562: PetscMalloc1(len+1,&buf_s);
563: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
564: for (proc=0,k=0; proc<size; proc++) {
565: if (!len_s[proc]) continue;
566: /* form outgoing message for i-structure:
567: buf_si[0]: nrows to be sent
568: [1:nrows]: row index (global)
569: [nrows+1:2*nrows+1]: i-structure index
570: */
571: /*-------------------------------------------*/
572: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
573: buf_si_i = buf_si + nrows+1;
574: buf_si[0] = nrows;
575: buf_si_i[0] = 0;
576: nrows = 0;
577: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
578: nzi = coi[i+1] - coi[i];
579: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
580: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
581: nrows++;
582: }
583: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
584: k++;
585: buf_si += len_si[proc];
586: }
587: for (i=0; i<nrecv; i++) {
588: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
589: }
590: PetscFree(rwaits);
591: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
593: PetscFree4(len_s,len_si,sstatus,owners_co);
594: PetscFree(len_ri);
595: PetscFree(swaits);
596: PetscFree(buf_s);
598: /* (5) compute the local portion of Cmpi */
599: /* ------------------------------------------ */
600: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
601: PetscFreeSpaceGet(Crmax,&free_space);
602: current_space = free_space;
604: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
605: for (k=0; k<nrecv; k++) {
606: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
607: nrows = *buf_ri_k[k];
608: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
609: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
610: }
612: MatPreallocateInitialize(comm,pn,pn,dnz,onz);
613: PetscLLCondensedCreate_Scalable(Crmax,&lnk);
614: for (i=0; i<pn; i++) {
615: /* add C_loc into Cmpi */
616: nzi = c_loc->i[i+1] - c_loc->i[i];
617: Jptr = c_loc->j + c_loc->i[i];
618: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
620: /* add received col data into lnk */
621: for (k=0; k<nrecv; k++) { /* k-th received message */
622: if (i == *nextrow[k]) { /* i-th row */
623: nzi = *(nextci[k]+1) - *nextci[k];
624: Jptr = buf_rj[k] + *nextci[k];
625: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
626: nextrow[k]++; nextci[k]++;
627: }
628: }
629: nzi = lnk[0];
631: /* copy data into free space, then initialize lnk */
632: PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);
633: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
634: }
635: PetscFree3(buf_ri_k,nextrow,nextci);
636: PetscLLCondensedDestroy_Scalable(lnk);
637: PetscFreeSpaceDestroy(free_space);
639: /* local sizes and preallocation */
640: MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
641: MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));
642: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
643: MatPreallocateFinalize(dnz,onz);
645: /* members in merge */
646: PetscFree(id_r);
647: PetscFree(len_r);
648: PetscFree(buf_ri[0]);
649: PetscFree(buf_ri);
650: PetscFree(buf_rj[0]);
651: PetscFree(buf_rj);
652: PetscLayoutDestroy(&rowmap);
654: /* attach the supporting struct to Cmpi for reuse */
655: c = (Mat_MPIAIJ*)Cmpi->data;
656: c->ap = ptap;
657: ptap->duplicate = Cmpi->ops->duplicate;
658: ptap->destroy = Cmpi->ops->destroy;
659: ptap->view = Cmpi->ops->view;
661: /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
662: Cmpi->assembled = PETSC_FALSE;
663: Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
664: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
665: Cmpi->ops->view = MatView_MPIAIJ_PtAP;
666: Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
667: *C = Cmpi;
669: nout = 0;
670: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_oth->i[ptap->C_oth->rmap->n],c_oth->j,&nout,c_oth->j);
671: if (c_oth->i[ptap->C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_oth->i[ptap->C_oth->rmap->n],nout);
672: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,&nout,c_loc->j);
673: if (c_loc->i[ptap->C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_loc->i[ptap->C_loc->rmap->n],nout);
675: return(0);
676: }
678: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
679: {
680: PetscErrorCode ierr;
681: Mat_APMPI *ptap;
682: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
683: MPI_Comm comm;
684: PetscMPIInt size,rank;
685: Mat Cmpi;
686: PetscFreeSpaceList free_space=NULL,current_space=NULL;
687: PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
688: PetscInt *lnk,i,k,pnz,row,nsend;
689: PetscBT lnkbt;
690: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
691: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
692: PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
693: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
694: MPI_Request *swaits,*rwaits;
695: MPI_Status *sstatus,rstatus;
696: PetscLayout rowmap;
697: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
698: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
699: PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
700: Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
701: PetscScalar *apv;
702: PetscTable ta;
703: MatType mtype;
704: const char *prefix;
705: #if defined(PETSC_USE_INFO)
706: PetscReal apfill;
707: #endif
710: PetscObjectGetComm((PetscObject)A,&comm);
711: MPI_Comm_size(comm,&size);
712: MPI_Comm_rank(comm,&rank);
714: if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
716: /* create symbolic parallel matrix Cmpi */
717: MatCreate(comm,&Cmpi);
718: MatGetType(A,&mtype);
719: MatSetType(Cmpi,mtype);
721: /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
722: Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
724: /* create struct Mat_APMPI and attached it to C later */
725: PetscNew(&ptap);
726: ptap->reuse = MAT_INITIAL_MATRIX;
727: ptap->algType = 1;
729: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
730: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
731: /* get P_loc by taking all local rows of P */
732: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
734: /* (0) compute Rd = Pd^T, Ro = Po^T */
735: /* --------------------------------- */
736: MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
737: MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
739: /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
740: /* ----------------------------------------------------------------- */
741: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
742: if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
744: /* create and initialize a linked list */
745: PetscTableCreate(pn,pN,&ta); /* for compute AP_loc and Cmpi */
746: MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
747: MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
748: PetscTableGetCount(ta,&Crmax); /* Crmax = nnz(sum of Prows) */
749: /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
751: PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);
753: /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
754: if (ao) {
755: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);
756: } else {
757: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);
758: }
759: current_space = free_space;
760: nspacedouble = 0;
762: PetscMalloc1(am+1,&api);
763: api[0] = 0;
764: for (i=0; i<am; i++) {
765: /* diagonal portion: Ad[i,:]*P */
766: ai = ad->i; pi = p_loc->i;
767: nzi = ai[i+1] - ai[i];
768: aj = ad->j + ai[i];
769: for (j=0; j<nzi; j++) {
770: row = aj[j];
771: pnz = pi[row+1] - pi[row];
772: Jptr = p_loc->j + pi[row];
773: /* add non-zero cols of P into the sorted linked list lnk */
774: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
775: }
776: /* off-diagonal portion: Ao[i,:]*P */
777: if (ao) {
778: ai = ao->i; pi = p_oth->i;
779: nzi = ai[i+1] - ai[i];
780: aj = ao->j + ai[i];
781: for (j=0; j<nzi; j++) {
782: row = aj[j];
783: pnz = pi[row+1] - pi[row];
784: Jptr = p_oth->j + pi[row];
785: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
786: }
787: }
788: apnz = lnk[0];
789: api[i+1] = api[i] + apnz;
790: if (ap_rmax < apnz) ap_rmax = apnz;
792: /* if free space is not available, double the total space in the list */
793: if (current_space->local_remaining<apnz) {
794: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
795: nspacedouble++;
796: }
798: /* Copy data into free space, then initialize lnk */
799: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
801: current_space->array += apnz;
802: current_space->local_used += apnz;
803: current_space->local_remaining -= apnz;
804: }
805: /* Allocate space for apj and apv, initialize apj, and */
806: /* destroy list of free space and other temporary array(s) */
807: PetscMalloc2(api[am],&apj,api[am],&apv);
808: PetscFreeSpaceContiguous(&free_space,apj);
809: PetscLLDestroy(lnk,lnkbt);
811: /* Create AP_loc for reuse */
812: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);
814: #if defined(PETSC_USE_INFO)
815: if (ao) {
816: apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
817: } else {
818: apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
819: }
820: ptap->AP_loc->info.mallocs = nspacedouble;
821: ptap->AP_loc->info.fill_ratio_given = fill;
822: ptap->AP_loc->info.fill_ratio_needed = apfill;
824: if (api[am]) {
825: PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);
826: PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);
827: } else {
828: PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");
829: }
830: #endif
832: /* (2-1) compute symbolic Co = Ro*AP_loc */
833: /* ------------------------------------ */
834: MatGetOptionsPrefix(A,&prefix);
835: MatSetOptionsPrefix(ptap->Ro,prefix);
836: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
837: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);
839: /* (3) send coj of C_oth to other processors */
840: /* ------------------------------------------ */
841: /* determine row ownership */
842: PetscLayoutCreate(comm,&rowmap);
843: rowmap->n = pn;
844: rowmap->bs = 1;
845: PetscLayoutSetUp(rowmap);
846: owners = rowmap->range;
848: /* determine the number of messages to send, their lengths */
849: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
850: PetscMemzero(len_s,size*sizeof(PetscMPIInt));
851: PetscMemzero(len_si,size*sizeof(PetscMPIInt));
853: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
854: coi = c_oth->i; coj = c_oth->j;
855: con = ptap->C_oth->rmap->n;
856: proc = 0;
857: for (i=0; i<con; i++) {
858: while (prmap[i] >= owners[proc+1]) proc++;
859: len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */
860: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
861: }
863: len = 0; /* max length of buf_si[], see (4) */
864: owners_co[0] = 0;
865: nsend = 0;
866: for (proc=0; proc<size; proc++) {
867: owners_co[proc+1] = owners_co[proc] + len_si[proc];
868: if (len_s[proc]) {
869: nsend++;
870: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
871: len += len_si[proc];
872: }
873: }
875: /* determine the number and length of messages to receive for coi and coj */
876: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
877: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
879: /* post the Irecv and Isend of coj */
880: PetscCommGetNewTag(comm,&tagj);
881: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
882: PetscMalloc1(nsend+1,&swaits);
883: for (proc=0, k=0; proc<size; proc++) {
884: if (!len_s[proc]) continue;
885: i = owners_co[proc];
886: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
887: k++;
888: }
890: /* (2-2) compute symbolic C_loc = Rd*AP_loc */
891: /* ---------------------------------------- */
892: MatSetOptionsPrefix(ptap->Rd,prefix);
893: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
894: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);
895: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
897: /* receives coj are complete */
898: for (i=0; i<nrecv; i++) {
899: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
900: }
901: PetscFree(rwaits);
902: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
904: /* add received column indices into ta to update Crmax */
905: for (k=0; k<nrecv; k++) {/* k-th received message */
906: Jptr = buf_rj[k];
907: for (j=0; j<len_r[k]; j++) {
908: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
909: }
910: }
911: PetscTableGetCount(ta,&Crmax);
912: PetscTableDestroy(&ta);
914: /* (4) send and recv coi */
915: /*-----------------------*/
916: PetscCommGetNewTag(comm,&tagi);
917: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
918: PetscMalloc1(len+1,&buf_s);
919: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
920: for (proc=0,k=0; proc<size; proc++) {
921: if (!len_s[proc]) continue;
922: /* form outgoing message for i-structure:
923: buf_si[0]: nrows to be sent
924: [1:nrows]: row index (global)
925: [nrows+1:2*nrows+1]: i-structure index
926: */
927: /*-------------------------------------------*/
928: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
929: buf_si_i = buf_si + nrows+1;
930: buf_si[0] = nrows;
931: buf_si_i[0] = 0;
932: nrows = 0;
933: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
934: nzi = coi[i+1] - coi[i];
935: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
936: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
937: nrows++;
938: }
939: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
940: k++;
941: buf_si += len_si[proc];
942: }
943: for (i=0; i<nrecv; i++) {
944: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
945: }
946: PetscFree(rwaits);
947: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
949: PetscFree4(len_s,len_si,sstatus,owners_co);
950: PetscFree(len_ri);
951: PetscFree(swaits);
952: PetscFree(buf_s);
954: /* (5) compute the local portion of Cmpi */
955: /* ------------------------------------------ */
956: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
957: PetscFreeSpaceGet(Crmax,&free_space);
958: current_space = free_space;
960: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
961: for (k=0; k<nrecv; k++) {
962: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
963: nrows = *buf_ri_k[k];
964: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
965: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
966: }
968: MatPreallocateInitialize(comm,pn,pn,dnz,onz);
969: PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);
970: for (i=0; i<pn; i++) {
971: /* add C_loc into Cmpi */
972: nzi = c_loc->i[i+1] - c_loc->i[i];
973: Jptr = c_loc->j + c_loc->i[i];
974: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
976: /* add received col data into lnk */
977: for (k=0; k<nrecv; k++) { /* k-th received message */
978: if (i == *nextrow[k]) { /* i-th row */
979: nzi = *(nextci[k]+1) - *nextci[k];
980: Jptr = buf_rj[k] + *nextci[k];
981: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
982: nextrow[k]++; nextci[k]++;
983: }
984: }
985: nzi = lnk[0];
987: /* copy data into free space, then initialize lnk */
988: PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);
989: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
990: }
991: PetscFree3(buf_ri_k,nextrow,nextci);
992: PetscLLDestroy(lnk,lnkbt);
993: PetscFreeSpaceDestroy(free_space);
995: /* local sizes and preallocation */
996: MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
997: MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));
998: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
999: MatPreallocateFinalize(dnz,onz);
1001: /* members in merge */
1002: PetscFree(id_r);
1003: PetscFree(len_r);
1004: PetscFree(buf_ri[0]);
1005: PetscFree(buf_ri);
1006: PetscFree(buf_rj[0]);
1007: PetscFree(buf_rj);
1008: PetscLayoutDestroy(&rowmap);
1010: /* attach the supporting struct to Cmpi for reuse */
1011: c = (Mat_MPIAIJ*)Cmpi->data;
1012: c->ap = ptap;
1013: ptap->duplicate = Cmpi->ops->duplicate;
1014: ptap->destroy = Cmpi->ops->destroy;
1015: ptap->view = Cmpi->ops->view;
1016: PetscCalloc1(pN,&ptap->apa);
1018: /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
1019: Cmpi->assembled = PETSC_FALSE;
1020: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
1021: Cmpi->ops->view = MatView_MPIAIJ_PtAP;
1022: Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
1023: *C = Cmpi;
1024: return(0);
1025: }
1027: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
1028: {
1029: PetscErrorCode ierr;
1030: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1031: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
1032: Mat_SeqAIJ *ap,*p_loc,*p_oth=NULL,*c_seq;
1033: Mat_APMPI *ptap = c->ap;
1034: Mat AP_loc,C_loc,C_oth;
1035: PetscInt i,rstart,rend,cm,ncols,row;
1036: PetscInt *api,*apj,am = A->rmap->n,j,col,apnz;
1037: PetscScalar *apa;
1038: const PetscInt *cols;
1039: const PetscScalar *vals;
1042: if (!ptap) {
1043: MPI_Comm comm;
1044: PetscObjectGetComm((PetscObject)C,&comm);
1045: SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1046: }
1048: MatZeroEntries(C);
1049: /* 1) get R = Pd^T,Ro = Po^T */
1050: if (ptap->reuse == MAT_REUSE_MATRIX) {
1051: MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
1052: MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
1053: }
1055: /* 2) get AP_loc */
1056: AP_loc = ptap->AP_loc;
1057: ap = (Mat_SeqAIJ*)AP_loc->data;
1059: /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
1060: /*-----------------------------------------------------*/
1061: if (ptap->reuse == MAT_REUSE_MATRIX) {
1062: /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
1063: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
1064: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
1065: }
1067: /* 2-2) compute numeric A_loc*P - dominating part */
1068: /* ---------------------------------------------- */
1069: /* get data from symbolic products */
1070: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1071: if (ptap->P_oth) {
1072: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1073: }
1074: apa = ptap->apa;
1075: api = ap->i;
1076: apj = ap->j;
1077: for (i=0; i<am; i++) {
1078: /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
1079: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
1080: apnz = api[i+1] - api[i];
1081: for (j=0; j<apnz; j++) {
1082: col = apj[j+api[i]];
1083: ap->a[j+ap->i[i]] = apa[col];
1084: apa[col] = 0.0;
1085: }
1086: PetscLogFlops(2.0*apnz);
1087: }
1089: /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1090: ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);
1091: ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);
1092: C_loc = ptap->C_loc;
1093: C_oth = ptap->C_oth;
1095: /* add C_loc and Co to to C */
1096: MatGetOwnershipRange(C,&rstart,&rend);
1098: /* C_loc -> C */
1099: cm = C_loc->rmap->N;
1100: c_seq = (Mat_SeqAIJ*)C_loc->data;
1101: cols = c_seq->j;
1102: vals = c_seq->a;
1105: /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
1106: /* when there are no off-processor parts. */
1107: /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
1108: /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
1109: /* a table, and other, more complex stuff has to be done. */
1110: if (C->assembled) {
1111: C->was_assembled = PETSC_TRUE;
1112: C->assembled = PETSC_FALSE;
1113: }
1114: if (C->was_assembled) {
1115: for (i=0; i<cm; i++) {
1116: ncols = c_seq->i[i+1] - c_seq->i[i];
1117: row = rstart + i;
1118: MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);
1119: cols += ncols; vals += ncols;
1120: }
1121: } else {
1122: MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);
1123: }
1125: /* Co -> C, off-processor part */
1126: cm = C_oth->rmap->N;
1127: c_seq = (Mat_SeqAIJ*)C_oth->data;
1128: cols = c_seq->j;
1129: vals = c_seq->a;
1130: for (i=0; i<cm; i++) {
1131: ncols = c_seq->i[i+1] - c_seq->i[i];
1132: row = p->garray[i];
1133: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1134: cols += ncols; vals += ncols;
1135: }
1137: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1138: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1140: ptap->reuse = MAT_REUSE_MATRIX;
1142: /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
1143: if (ptap->freestruct) {
1144: MatFreeIntermediateDataStructures(C);
1145: }
1146: return(0);
1147: }