Actual source code: neprefine.c

slepc-3.7.0 2016-05-16
Report Typos and Errors
  1: /*
  2:    Newton refinement for NEP, simple version.

  4:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  5:    SLEPc - Scalable Library for Eigenvalue Problem Computations
  6:    Copyright (c) 2002-2016, Universitat Politecnica de Valencia, Spain

  8:    This file is part of SLEPc.

 10:    SLEPc is free software: you can redistribute it and/or modify it under  the
 11:    terms of version 3 of the GNU Lesser General Public License as published by
 12:    the Free Software Foundation.

 14:    SLEPc  is  distributed in the hope that it will be useful, but WITHOUT  ANY
 15:    WARRANTY;  without even the implied warranty of MERCHANTABILITY or  FITNESS
 16:    FOR  A  PARTICULAR PURPOSE. See the GNU Lesser General Public  License  for
 17:    more details.

 19:    You  should have received a copy of the GNU Lesser General  Public  License
 20:    along with SLEPc. If not, see <http://www.gnu.org/licenses/>.
 21:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 22: */

 24: #include <slepc/private/nepimpl.h>
 25: #include <slepcblaslapack.h>

 27: #define NREF_MAXIT 10

 29: typedef struct {
 30:   PetscSubcomm  subc;
 31:   VecScatter    *scatter_id,nst;
 32:   Mat           *A;
 33:   Vec           nv,vg,v,w;
 34:   FN            *fn;
 35: } NEPSimpNRefctx;

 37: typedef struct {
 38:   Mat          M1;
 39:   Vec          M2,M3;
 40:   PetscScalar  M4,m3;
 41: } FSubctx;

 45: static PetscErrorCode MatFSMult(Mat M ,Vec x,Vec y)
 46: {
 48:   FSubctx        *ctx;
 49:   PetscScalar    t;
 50:   
 52:   MatShellGetContext(M,&ctx);
 53:   VecDot(x,ctx->M3,&t);
 54:   t *= ctx->m3/ctx->M4;
 55:   MatMult(ctx->M1,x,y);
 56:   VecAXPY(y,-t,ctx->M2);
 57:   return(0);
 58: }

 62: static PetscErrorCode NEPSimpleNRefSetUp(NEP nep,NEPSimpNRefctx **ctx_)
 63: {
 65:   PetscInt       i,si,j,n0,m0,nloc,*idx1,*idx2,ne;
 66:   IS             is1,is2;
 67:   NEPSimpNRefctx *ctx;
 68:   Vec            v;
 69:   PetscMPIInt    rank,size;

 72:   PetscMalloc1(1,ctx_);
 73:   ctx = *ctx_;
 74:   if (nep->npart==1) {
 75:     ctx->subc = NULL;
 76:     ctx->scatter_id = NULL;
 77:     ctx->A = nep->A;
 78:     ctx->fn = nep->f;
 79:   } else {
 80:     PetscMalloc2(nep->nt,&ctx->A,nep->npart,&ctx->scatter_id);

 82:     /* Duplicate matrices */
 83:     for (i=0;i<nep->nt;i++) {
 84:       MatCreateRedundantMatrix(nep->A[i],0,PetscSubcommChild(ctx->subc),MAT_INITIAL_MATRIX,&ctx->A[i]);
 85:     }
 86:     MatCreateVecs(ctx->A[0],&ctx->v,NULL);

 88:     /* Duplicate FNs */
 89:     PetscMalloc1(nep->nt,&ctx->fn);
 90:     for (i=0;i<nep->nt;i++) {
 91:       FNDuplicate(nep->f[i],PetscSubcommChild(ctx->subc),&ctx->fn[i]);
 92:     }

 94:     /* Create scatters for sending vectors to each subcommucator */
 95:     BVGetColumn(nep->V,0,&v);
 96:     VecGetOwnershipRange(v,&n0,&m0);
 97:     BVRestoreColumn(nep->V,0,&v);
 98:     VecGetLocalSize(ctx->v,&nloc);
 99:     PetscMalloc2(m0-n0,&idx1,m0-n0,&idx2);
100:     VecCreateMPI(PetscObjectComm((PetscObject)nep),nloc,PETSC_DECIDE,&ctx->vg);
101:     for (si=0;si<nep->npart;si++) {
102:       j = 0;
103:       for (i=n0;i<m0;i++) {
104:         idx1[j]   = i;
105:         idx2[j++] = i+nep->n*si;
106:       }
107:       ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx1,PETSC_COPY_VALUES,&is1);
108:       ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx2,PETSC_COPY_VALUES,&is2);
109:       BVGetColumn(nep->V,0,&v);
110:       VecScatterCreate(v,is1,ctx->vg,is2,&ctx->scatter_id[si]);
111:       BVRestoreColumn(nep->V,0,&v);
112:       ISDestroy(&is1);
113:       ISDestroy(&is2);
114:     }
115:     PetscFree2(idx1,idx2);
116:   }
117:   if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
118:     MPI_Comm_rank(PetscObjectComm((PetscObject)ctx->A[0]),&rank);
119:     MPI_Comm_size(PetscObjectComm((PetscObject)ctx->A[0]),&size);
120:     if (size>1) {
121:       if (nep->npart==1) {
122:         BVGetColumn(nep->V,0,&v);
123:       } else {v = ctx->v;}
124:       VecGetOwnershipRange(v,&n0,&m0);
125:       ne = (rank == size-1)?nep->n:0;
126:       VecCreateMPI(PetscObjectComm((PetscObject)ctx->A[0]),ne,PETSC_DECIDE,&ctx->nv);
127:       PetscMalloc1(m0-n0,&idx1);
128:       for (i=n0;i<m0;i++) {
129:         idx1[i-n0] = i;
130:       }
131:       ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx1,PETSC_COPY_VALUES,&is1);
132:       VecScatterCreate(v,is1,ctx->nv,is1,&ctx->nst);
133:       if (nep->npart==1) {
134:         BVRestoreColumn(nep->V,0,&v);
135:       }
136:       PetscFree(idx1);
137:       ISDestroy(&is1);
138:     }
139:   }  return(0);  
140: }

142: /*
143:   Gather Eigenpair idx from subcommunicator with color sc
144: */
147: static PetscErrorCode NEPSimpleNRefGatherEigenpair(NEP nep,NEPSimpNRefctx *ctx,PetscInt sc,PetscInt idx,PetscInt *fail)
148: {
150:   PetscMPIInt    nproc,p;
151:   MPI_Comm       comm=((PetscObject)nep)->comm;
152:   Vec            v;
153:   PetscScalar    *array;

156:   if (nep->npart>1) {
157:     MPI_Comm_size(comm,&nproc);
158:     p = (nproc/nep->npart)*sc+PetscMin(sc,nproc%nep->npart);
159:     /* Communicate convergence successful */
160:     MPI_Bcast(fail,1,MPIU_INT,p,comm);
161:     if (!(*fail)) {
162:       /* Process 0 of subcommunicator sc broadcasts the eigenvalue */ 
163:       MPI_Bcast(&nep->eigr[idx],1,MPIU_SCALAR,p,comm);
164:       /* Gather nep->V[idx] from the subcommuniator sc */
165:       BVGetColumn(nep->V,idx,&v);
166:       if (ctx->subc->color==sc) {
167:         VecGetArray(ctx->v,&array);
168:         VecPlaceArray(ctx->vg,array);
169:       }
170:       VecScatterBegin(ctx->scatter_id[sc],ctx->vg,v,INSERT_VALUES,SCATTER_REVERSE);
171:       VecScatterEnd(ctx->scatter_id[sc],ctx->vg,v,INSERT_VALUES,SCATTER_REVERSE);
172:       if (ctx->subc->color==sc) {
173:         VecResetArray(ctx->vg);
174:         VecRestoreArray(ctx->v,&array);
175:       }
176:       BVRestoreColumn(nep->V,idx,&v);
177:     }
178:   } else {
179:     if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT && !(*fail)) {
180:       MPI_Comm_size(comm,&nproc);
181:       p = (nproc/nep->npart)*sc+PetscMin(sc,nproc%nep->npart);
182:       MPI_Bcast(&nep->eigr[idx],1,MPIU_SCALAR,p,comm);
183:     }
184:   }
185:   return(0);
186: }

190: static PetscErrorCode NEPSimpleNRefScatterEigenvector(NEP nep,NEPSimpNRefctx *ctx,PetscInt sc,PetscInt idx)
191: {
193:   Vec            v;
194:   PetscScalar    *array;
195:   
197:   if (nep->npart>1) {
198:     BVGetColumn(nep->V,idx,&v);
199:     if (ctx->subc->color==sc) {
200:       VecGetArray(ctx->v,&array);
201:       VecPlaceArray(ctx->vg,array);
202:     }
203:     VecScatterBegin(ctx->scatter_id[sc],v,ctx->vg,INSERT_VALUES,SCATTER_FORWARD);
204:     VecScatterEnd(ctx->scatter_id[sc],v,ctx->vg,INSERT_VALUES,SCATTER_FORWARD);
205:     if (ctx->subc->color==sc) {
206:       VecResetArray(ctx->vg);
207:       VecRestoreArray(ctx->v,&array);
208:     }
209:     BVRestoreColumn(nep->V,idx,&v);
210:   }
211:   return(0);
212: }

216: static PetscErrorCode NEPSimpleNRefSetUpSystem(NEP nep,NEPSimpNRefctx *ctx,Mat *A,PetscInt idx,Mat *Mt,Mat *T,Mat *P,PetscBool ini,Vec t,Vec v)
217: {
218:   PetscErrorCode    ierr;
219:   PetscInt          i,st,ml,m0,n0,m1,mg;
220:   PetscInt          *dnz,*onz,ncols,*cols2,*nnz,nt=nep->nt;
221:   PetscScalar       zero=0.0,*coeffs,*coeffs2;
222:   PetscMPIInt       rank,size;
223:   MPI_Comm          comm;
224:   const PetscInt    *cols;
225:   const PetscScalar *vals,*array;
226:   FSubctx           *fctx;
227:   Vec               w=ctx->w;
228:   Mat               M;

231:   PetscMalloc2(nt,&coeffs,nt,&coeffs2);
232:   switch (nep->scheme) {
233:   case NEP_REFINE_SCHEME_SCHUR:
234:     if (ini) {
235:       PetscCalloc1(1,&fctx);
236:       MatGetSize(A[0],&m0,&n0);
237:       MatCreateShell(PetscObjectComm((PetscObject)A[0]),PETSC_DECIDE,PETSC_DECIDE,m0,n0,fctx,T);
238:       MatShellSetOperation(*T,MATOP_MULT,(void(*)(void))MatFSMult);
239:     } else {
240:       MatShellGetContext(*T,&fctx);
241:     }    
242:     M=fctx->M1;
243:     break;
244:   case NEP_REFINE_SCHEME_MBE:
245:     M=*T;
246:     break;
247:   case NEP_REFINE_SCHEME_EXPLICIT:
248:     M=*Mt;
249:     break;
250:   }
251:   if (ini) {
252:     MatDuplicate(A[0],MAT_COPY_VALUES,&M);
253:   } else {
254:     MatCopy(A[0],M,DIFFERENT_NONZERO_PATTERN);
255:   }
256:   for (i=0;i<nt;i++) {
257:     FNEvaluateFunction(ctx->fn[i],nep->eigr[idx],coeffs+i);
258:   }
259:   if (coeffs[0]!=1.0) {
260:     MatScale(M,coeffs[0]);
261:   }
262:   for (i=1;i<nt;i++) {
263:     MatAXPY(M,coeffs[i],A[i],(ini)?nep->mstr:SUBSET_NONZERO_PATTERN);
264:   }
265:   for (i=0;i<nt;i++) {
266:     FNEvaluateDerivative(ctx->fn[i],nep->eigr[idx],coeffs2+i);
267:   }
268:   st = 0;
269:   for (i=0;i<nt && PetscAbsScalar(coeffs2[i])==0.0;i++) st++;
270:   MatMult(A[st],v,w);
271:   if (coeffs2[st]!=1.0) {
272:     VecScale(w,coeffs2[st]);
273:   }
274:   for (i=st+1;i<nt;i++) {
275:     MatMult(A[i],v,t);
276:     VecAXPY(w,coeffs2[i],t);
277:   }

279:   switch (nep->scheme) {
280:   case NEP_REFINE_SCHEME_EXPLICIT:
281:     comm = PetscObjectComm((PetscObject)A[0]);
282:     MPI_Comm_rank(comm,&rank);
283:     MPI_Comm_size(comm,&size);
284:     MatGetSize(M,&mg,NULL);
285:     MatGetOwnershipRange(M,&m0,&m1);
286:     if (ini) {
287:       MatCreate(comm,T);
288:       MatGetLocalSize(M,&ml,NULL);
289:       if (rank==size-1) ml++;
290:       MatSetSizes(*T,ml,ml,mg+1,mg+1);
291:       MatSetFromOptions(*T);
292:       MatSetUp(*T);
293:       /* Preallocate M */
294:       if (size>1) {
295:         MatPreallocateInitialize(comm,ml,ml,dnz,onz);
296:         for (i=m0;i<m1;i++) {
297:           MatGetRow(M,i,&ncols,&cols,NULL);
298:           MatPreallocateSet(i,ncols,cols,dnz,onz);
299:           MatPreallocateSet(i,1,&mg,dnz,onz);
300:           MatRestoreRow(M,i,&ncols,&cols,NULL);
301:         }
302:         if (rank==size-1) {
303:           PetscCalloc1(mg+1,&cols2);
304:           for (i=0;i<mg+1;i++) cols2[i]=i;
305:           MatPreallocateSet(m1,mg+1,cols2,dnz,onz);
306:           PetscFree(cols2);
307:         }
308:         MatMPIAIJSetPreallocation(*T,0,dnz,0,onz);
309:         MatPreallocateFinalize(dnz,onz);
310:       } else {
311:         PetscCalloc1(mg+1,&nnz);
312:         for (i=0;i<mg;i++) {
313:           MatGetRow(M,i,&ncols,NULL,NULL);
314:           nnz[i] = ncols+1;
315:           MatRestoreRow(M,i,&ncols,NULL,NULL);
316:         }
317:         nnz[mg] = mg+1;
318:         MatSeqAIJSetPreallocation(*T,0,nnz);
319:         PetscFree(nnz);
320:       }
321:       *Mt = M;
322:       *P  = *T;
323:     }
324:   
325:     /* Set values */
326:     VecGetArrayRead(w,&array);
327:     for (i=m0;i<m1;i++) {
328:       MatGetRow(M,i,&ncols,&cols,&vals);
329:       MatSetValues(*T,1,&i,ncols,cols,vals,INSERT_VALUES);
330:       MatRestoreRow(M,i,&ncols,&cols,&vals);
331:       MatSetValues(*T,1,&i,1,&mg,array+i-m0,INSERT_VALUES);
332:     }
333:     VecRestoreArrayRead(w,&array);
334:     VecConjugate(v);
335:     MPI_Comm_size(PetscObjectComm((PetscObject)A[0]),&size);
336:     MPI_Comm_rank(PetscObjectComm((PetscObject)A[0]),&rank);
337:     if (size>1) {
338:       if (rank==size-1) {
339:         PetscMalloc1(nep->n,&cols2);
340:         for (i=0;i<nep->n;i++) cols2[i]=i;
341:       }
342:       VecScatterBegin(ctx->nst,v,ctx->nv,INSERT_VALUES,SCATTER_FORWARD);
343:       VecScatterEnd(ctx->nst,v,ctx->nv,INSERT_VALUES,SCATTER_FORWARD);
344:       VecGetArrayRead(ctx->nv,&array);
345:       if (rank==size-1) {
346:         MatSetValues(*T,1,&mg,nep->n,cols2,array,INSERT_VALUES);
347:         MatSetValues(*T,1,&mg,1,&mg,&zero,INSERT_VALUES);
348:       }
349:       VecRestoreArrayRead(ctx->nv,&array);
350:     } else {  
351:       PetscMalloc1(m1-m0,&cols2);
352:       for (i=0;i<m1-m0;i++) cols2[i]=m0+i;
353:       VecGetArrayRead(v,&array);
354:       MatSetValues(*T,1,&mg,m1-m0,cols2,array,INSERT_VALUES);
355:       MatSetValues(*T,1,&mg,1,&mg,&zero,INSERT_VALUES);
356:       VecRestoreArrayRead(v,&array);
357:     }
358:     VecConjugate(v);
359:     MatAssemblyBegin(*T,MAT_FINAL_ASSEMBLY);
360:     MatAssemblyEnd(*T,MAT_FINAL_ASSEMBLY);  
361:     PetscFree(cols2);
362:     break; 
363:   case NEP_REFINE_SCHEME_SCHUR:
364:     fctx->M2 = ctx->w;
365:     fctx->M3 = v;
366:     fctx->m3 = 0.0;
367:     for (i=1;i<nt-1;i++) fctx->m3 += PetscConj(coeffs[i])*coeffs[i];
368:     fctx->M4 = 0.0;
369:     for (i=1;i<nt-1;i++) fctx->M4 += PetscConj(coeffs[i])*coeffs2[i];
370:     fctx->M1 = M;
371:     if (ini) {
372:       MatDuplicate(M,MAT_COPY_VALUES,P);
373:     } else {
374:       MatCopy(M,*P,SAME_NONZERO_PATTERN);
375:     }
376:     VecConjugate(v);
377:     VecPointwiseMult(t,v,w);
378:     VecConjugate(v);
379:     VecScale(t,-fctx->m3/fctx->M4);
380:     MatDiagonalSet(*P,t,ADD_VALUES);
381:     break;
382:   case NEP_REFINE_SCHEME_MBE:
383:     *T = M;
384:     *P = M;
385:     break;
386:   }
387:   PetscFree2(coeffs,coeffs2);
388:   return(0);
389: }

393: PetscErrorCode NEPNewtonRefinementSimple(NEP nep,PetscInt *maxits,PetscReal tol,PetscInt k)
394: {
395:   PetscErrorCode    ierr;
396:   PetscInt          i,n,its,idx=0,*idx_sc,*its_sc,color,*fail_sc;
397:   PetscMPIInt       rank,size;
398:   Mat               Mt=NULL,T=NULL,P=NULL;
399:   MPI_Comm          comm;
400:   Vec               r,v,dv,rr=NULL,dvv=NULL,t[2];
401:   const PetscScalar *array;
402:   PetscScalar       *array2,deig=0.0,tt[2],ttt;
403:   PetscReal         norm,error;
404:   PetscBool         ini=PETSC_TRUE,sc_pend,solved=PETSC_FALSE;
405:   NEPSimpNRefctx    *ctx;
406:   FSubctx            *fctx=NULL;
407:   KSPConvergedReason reason;

410:   PetscLogEventBegin(NEP_Refine,nep,0,0,0);
411:   NEPSimpleNRefSetUp(nep,&ctx);
412:   its = (maxits)?*maxits:NREF_MAXIT;
413:   comm = (nep->npart==1)?PetscObjectComm((PetscObject)nep):PetscSubcommChild(ctx->subc);
414:   if (!nep->refineksp) { NEPRefineGetKSP(nep,&nep->refineksp); }
415:   if (nep->npart==1) {
416:     BVGetColumn(nep->V,0,&v);
417:   } else v = ctx->v;
418:   VecDuplicate(v,&ctx->w);
419:   VecDuplicate(v,&r);
420:   VecDuplicate(v,&dv);
421:   VecDuplicate(v,&t[0]);
422:   VecDuplicate(v,&t[1]);
423:   if (nep->npart==1) { BVRestoreColumn(nep->V,0,&v); }
424:   MPI_Comm_size(comm,&size);
425:   MPI_Comm_rank(comm,&rank);
426:   VecGetLocalSize(r,&n);
427:   PetscMalloc3(nep->npart,&idx_sc,nep->npart,&its_sc,nep->npart,&fail_sc);
428:   for (i=0;i<nep->npart;i++) fail_sc[i] = 0;
429:   for (i=0;i<nep->npart;i++) its_sc[i] = 0;
430:   color = (nep->npart==1)?0:ctx->subc->color;
431:    
432:   /* Loop performing iterative refinements */
433:   while (!solved) {
434:     for (i=0;i<nep->npart;i++) {
435:       sc_pend = PETSC_TRUE;
436:       if (its_sc[i]==0) {
437:         idx_sc[i] = idx++;
438:         if (idx_sc[i]>=k) {
439:           sc_pend = PETSC_FALSE;
440:         } else {
441:           NEPSimpleNRefScatterEigenvector(nep,ctx,i,idx_sc[i]);
442:         }
443:       }  else { /* Gather Eigenpair from subcommunicator i */
444:         NEPSimpleNRefGatherEigenpair(nep,ctx,i,idx_sc[i],&fail_sc[i]);
445:       }
446:       while (sc_pend) {
447:         if (!fail_sc[i]) {
448:           NEPComputeError(nep,idx_sc[i],NEP_ERROR_RELATIVE,&error);
449:         }
450:         if (error<=tol || its_sc[i]>=its || fail_sc[i]) {
451:           idx_sc[i] = idx++;
452:           its_sc[i] = 0;
453:           if (idx_sc[i]<k) { NEPSimpleNRefScatterEigenvector(nep,ctx,i,idx_sc[i]); }
454:         } else {
455:           sc_pend = PETSC_FALSE;
456:           its_sc[i]++;
457:         }
458:         if (idx_sc[i]>=k) sc_pend = PETSC_FALSE;
459:       }
460:     }
461:     solved = PETSC_TRUE;
462:     for (i=0;i<nep->npart&&solved;i++) solved = PetscNot(idx_sc[i]<k);
463:     if (idx_sc[color]<k) {
464: #if !defined(PETSC_USE_COMPLEX)
465:       if (nep->eigi[idx_sc[color]]!=0.0) SETERRQ(PetscObjectComm((PetscObject)nep),1,"Simple Refinement not implemented in real scalar for complex eigenvalues");
466: #endif
467:       if (nep->npart==1) {
468:         BVGetColumn(nep->V,idx_sc[color],&v);
469:       } else v = ctx->v; 
470:       NEPSimpleNRefSetUpSystem(nep,ctx,ctx->A,idx_sc[color],&Mt,&T,&P,ini,t[0],v);
471:       KSPSetOperators(nep->refineksp,T,P);
472:       if (ini) {
473:         KSPSetFromOptions(nep->refineksp);
474:         if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
475:           MatCreateVecs(T,&dvv,NULL);
476:           VecDuplicate(dvv,&rr);
477:         }
478:         ini = PETSC_FALSE;
479:       }
480:       switch (nep->scheme) {
481:       case NEP_REFINE_SCHEME_EXPLICIT:
482:         MatMult(Mt,v,r);
483:         VecGetArrayRead(r,&array);
484:         if (rank==size-1) {
485:           VecGetArray(rr,&array2);
486:           PetscMemcpy(array2,array,n*sizeof(PetscScalar));
487:           array2[n] = 0.0;
488:           VecRestoreArray(rr,&array2);
489:         } else {
490:           VecPlaceArray(rr,array);
491:         }
492:         KSPSolve(nep->refineksp,rr,dvv);
493:         KSPGetConvergedReason(nep->refineksp,&reason);
494:         if (reason>0) {
495:           if (rank != size-1) {
496:             VecResetArray(rr);
497:           }
498:           VecRestoreArrayRead(r,&array);
499:           VecGetArrayRead(dvv,&array);
500:           VecPlaceArray(dv,array);
501:           VecAXPY(v,-1.0,dv);
502:           VecNorm(v,NORM_2,&norm);
503:           VecScale(v,1.0/norm);
504:           VecResetArray(dv);
505:           if (rank==size-1) nep->eigr[idx_sc[color]] -= array[n];
506:           VecRestoreArrayRead(dvv,&array);
507:         } else fail_sc[color] = 1;
508:         break;
509:       case NEP_REFINE_SCHEME_MBE:
510:         MatMult(T,v,r);
511:         /* Mixed block elimination */
512:         VecConjugate(v);
513:         KSPSolveTranspose(nep->refineksp,v,t[0]);
514:         KSPGetConvergedReason(nep->refineksp,&reason);
515:         if (reason>0) {
516:           VecConjugate(t[0]);
517:           VecDot(ctx->w,t[0],&tt[0]);
518:           KSPSolve(nep->refineksp,ctx->w,t[1]);
519:           KSPGetConvergedReason(nep->refineksp,&reason);
520:           if (reason>0) {
521:             VecDot(t[1],v,&tt[1]);
522:             VecDot(r,t[0],&ttt);
523:             tt[0] = ttt/tt[0];
524:             VecAXPY(r,-tt[0],ctx->w);
525:             KSPSolve(nep->refineksp,r,dv);
526:             KSPGetConvergedReason(nep->refineksp,&reason);
527:             if (reason>0) {
528:               VecDot(dv,v,&ttt);
529:               tt[1] = ttt/tt[1];
530:               VecAXPY(dv,-tt[1],t[1]);
531:               deig = tt[0]+tt[1];
532:             }
533:           }
534:           VecConjugate(v);
535:           VecAXPY(v,-1.0,dv);
536:           VecNorm(v,NORM_2,&norm);
537:           VecScale(v,1.0/norm);
538:           nep->eigr[idx_sc[color]] -= deig;
539:           fail_sc[color] = 0;
540:         } else {
541:           VecConjugate(v);
542:           fail_sc[color] = 1;
543:         }
544:         break;
545:       case NEP_REFINE_SCHEME_SCHUR:
546:         MatShellGetContext(T,&fctx);
547:         MatMult(fctx->M1,v,r);
548:         KSPSolve(nep->refineksp,r,dv);
549:         KSPGetConvergedReason(nep->refineksp,&reason);
550:         if (reason>0) {
551:           VecDot(dv,v,&deig);
552:           deig *= -fctx->m3/fctx->M4;
553:           VecAXPY(v,-1.0,dv);
554:           VecNorm(v,NORM_2,&norm);
555:           VecScale(v,1.0/norm);
556:           nep->eigr[idx_sc[color]] -= deig;
557:           fail_sc[color] = 0;
558:         } else fail_sc[color] = 1;
559:         break;
560:       }
561:       if (nep->npart==1) { BVRestoreColumn(nep->V,idx_sc[color],&v); }
562:     }
563:   }
564:   VecDestroy(&t[0]);
565:   VecDestroy(&t[1]);
566:   VecDestroy(&dv);
567:   VecDestroy(&ctx->w);
568:   VecDestroy(&r);
569:   PetscFree3(idx_sc,its_sc,fail_sc);
570:   VecScatterDestroy(&ctx->nst);
571:   if (nep->npart>1) {
572:     VecDestroy(&ctx->vg);
573:     VecDestroy(&ctx->v);
574:     for (i=0;i<nep->nt;i++) {
575:       MatDestroy(&ctx->A[i]);
576:     }
577:     for (i=0;i<nep->npart;i++) {
578:       VecScatterDestroy(&ctx->scatter_id[i]);
579:     }
580:     PetscFree2(ctx->A,ctx->scatter_id);
581:   }
582:   if (fctx && nep->scheme==NEP_REFINE_SCHEME_SCHUR) {
583:     MatDestroy(&P);
584:     MatDestroy(&fctx->M1);
585:     PetscFree(fctx);
586:   }
587:   if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
588:     MatDestroy(&Mt);
589:     VecDestroy(&dvv);
590:     VecDestroy(&rr);
591:     VecDestroy(&ctx->nv);
592:     if (nep->npart>1) {
593:       for (i=0;i<nep->nt;i++) { FNDestroy(&ctx->fn[i]); }
594:       PetscFree(ctx->fn);
595:     }
596:   }
597:   MatDestroy(&T);
598:   PetscFree(ctx);
599:   PetscLogEventEnd(NEP_Refine,nep,0,0,0);
600:   return(0);
601: }