Actual source code: mpi.c

petsc-3.14.1 2020-11-03
Report Typos and Errors
  1: /*
  2:       This provides a few of the MPI-uni functions that cannot be implemented
  3:     with C macros
  4: */
  5: #include <petscsys.h>
  6: #if !defined(MPIUNI_H)
  7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
  8: #endif

 10: #include <petsc/private/petscimpl.h>

 12: #if defined(PETSC_HAVE_CUDA)
 13:   #include <cuda_runtime.h>
 14: #endif

 16: #if defined(PETSC_HAVE_HIP)
 17:   #include <hip/hip_runtime.h>
 18: #endif

 20: #define MPI_SUCCESS 0
 21: #define MPI_FAILURE 1

 23: void *MPIUNI_TMP = NULL;

 25: /*
 26:        With MPI Uni there are exactly four distinct communicators:
 27:     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)

 29:     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
 30:      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).

 32: */
 33: #define MAX_ATTR 256
 34: #define MAX_COMM 128

 36: typedef struct {
 37:   void *attribute_val;
 38:   int  active;
 39: } MPI_Attr;

 41: typedef struct {
 42:   void                *extra_state;
 43:   MPI_Delete_function *del;
 44:   int                 active;  /* Is this keyval in use by some comm? */
 45: } MPI_Attr_keyval;

 47: static MPI_Attr_keyval attr_keyval[MAX_ATTR];
 48: static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
 49: static int             comm_active[MAX_COMM];  /* Boolean array indicating which comms are in use */
 50: static int             mpi_tag_ub = 100000000;
 51: static int             num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
 52: static int             MaxComm  = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
 53: static void*           MPIUNIF_mpi_in_place = 0;

 55: #define CommIdx(comm)  ((comm)-1)  /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */

 57: #if defined(__cplusplus)
 58: extern "C" {
 59: #endif

 61: /*
 62:    To avoid problems with prototypes to the system memcpy() it is duplicated here
 63: */
 64: int MPIUNI_Memcpy(void *dst,const void *src,int n)
 65: {
 66:   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
 67:   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
 68:   if (!n) return MPI_SUCCESS;

 70:   /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
 71: #if defined(PETSC_HAVE_CUDA)
 72:   if (PetscCUDAInitialized) {cudaError_t cerr = cudaMemcpy(dst,src,n,cudaMemcpyDefault);if (cerr != cudaSuccess) return MPI_FAILURE;} else
 73: #elif defined(PETSC_HAVE_HIP)
 74:   if (PetscHIPInitialized)  {hipError_t  cerr = hipMemcpy(dst,src,n,hipMemcpyDefault);  if (cerr != hipSuccess)  return MPI_FAILURE;} else
 75: #endif
 76:   {memcpy(dst,src,n);}
 77:   return MPI_SUCCESS;
 78: }

 80: static int classcnt = 0;
 81: static int codecnt = 0;

 83: int MPI_Add_error_class(int *cl)
 84: {
 85:   *cl = classcnt++;
 86:   return MPI_SUCCESS;
 87: }

 89: int MPI_Add_error_code(int cl,int *co)
 90: {
 91:   if (cl >= classcnt) return MPI_FAILURE;
 92:   *co = codecnt++;
 93:   return MPI_SUCCESS;
 94: }

 96: int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
 97: {
 98:   int comb = datatype >> 28;
 99:   switch (comb) {
100:   case MPI_COMBINER_NAMED:
101:     *num_integers = 0;
102:     *num_addresses = 0;
103:     *num_datatypes = 0;
104:     *combiner = comb;
105:     break;
106:   case MPI_COMBINER_DUP:
107:     *num_integers = 0;
108:     *num_addresses = 0;
109:     *num_datatypes = 1;
110:     *combiner = comb;
111:     break;
112:   case MPI_COMBINER_CONTIGUOUS:
113:     *num_integers = 1;
114:     *num_addresses = 0;
115:     *num_datatypes = 1;
116:     *combiner = comb;
117:     break;
118:   default:
119:     return MPIUni_Abort(MPI_COMM_SELF,1);
120:   }
121:   return MPI_SUCCESS;
122: }

124: int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
125: {
126:   int comb = datatype >> 28;
127:   switch (comb) {
128:   case MPI_COMBINER_NAMED:
129:     return MPIUni_Abort(MPI_COMM_SELF,1);
130:     break;
131:   case MPI_COMBINER_DUP:
132:     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
133:     array_of_datatypes[0] = datatype & 0x0fffffff;
134:     break;
135:   case MPI_COMBINER_CONTIGUOUS:
136:     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
137:     array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
138:     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100;  /* basic named type (count=1) from which the contiguous type is derived */
139:     break;
140:   default:
141:     return MPIUni_Abort(MPI_COMM_SELF,1);
142:   }
143:   return MPI_SUCCESS;
144: }

146: /*
147:    Used to set the built-in MPI_TAG_UB attribute
148: */
149: static int Keyval_setup(void)
150: {
151:   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
152:   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
153:   attr[CommIdx(MPI_COMM_SELF)][0].active        = 1;
154:   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val = &mpi_tag_ub;
155:   attr_keyval[0].active                          = 1;
156:   return MPI_SUCCESS;
157: }

159: int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
160: {
161:   int i,keyid;
162:   for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
163:     if (!attr_keyval[i].active) {
164:       keyid = i;
165:       goto found;
166:     }
167:   }
168:   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
169:   keyid = num_attr++;

171: found:
172:   attr_keyval[keyid].extra_state = extra_state;
173:   attr_keyval[keyid].del         = delete_fn;
174:   attr_keyval[keyid].active      = 1;
175:   *keyval                        = keyid;
176:   return MPI_SUCCESS;
177: }

179: int MPI_Comm_free_keyval(int *keyval)
180: {
181:   attr_keyval[*keyval].extra_state = 0;
182:   attr_keyval[*keyval].del         = 0;
183:   attr_keyval[*keyval].active      = 0;
184:   *keyval = 0;
185:   return MPI_SUCCESS;
186: }

188: int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
189: {
190:   int idx = CommIdx(comm);
191:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
192:   attr[idx][keyval].active        = 1;
193:   attr[idx][keyval].attribute_val = attribute_val;
194:   return MPI_SUCCESS;
195: }

197: int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
198: {
199:   int idx = CommIdx(comm);
200:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
201:   if (attr[idx][keyval].active && attr_keyval[keyval].del) {
202:     void *save_attribute_val        = attr[idx][keyval].attribute_val;
203:     attr[idx][keyval].active        = 0;
204:     attr[idx][keyval].attribute_val = 0;
205:     (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
206:   }
207:   return MPI_SUCCESS;
208: }

210: int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
211: {
212:   int idx = CommIdx(comm);
213:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
214:   if (!keyval) Keyval_setup();
215:   *flag                  = attr[idx][keyval].active;
216:   *(void**)attribute_val = attr[idx][keyval].attribute_val;
217:   return MPI_SUCCESS;
218: }

220: int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
221: {
222:   int j;
223:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
224:   for (j=3; j<=MaxComm; j++) {
225:     if (!comm_active[CommIdx(j)]) {
226:       comm_active[CommIdx(j)] = 1;
227:       *newcomm = j;
228:       return MPI_SUCCESS;
229:     }
230:   }
231:   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
232:   *newcomm = ++MaxComm;
233:   comm_active[CommIdx(*newcomm)] = 1;
234:   return MPI_SUCCESS;
235: }

237: int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
238: {
239:   int j;
240:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
241:   for (j=3; j<=MaxComm; j++) {
242:     if (!comm_active[CommIdx(j)]) {
243:       comm_active[CommIdx(j)] = 1;
244:       *out = j;
245:       return MPI_SUCCESS;
246:     }
247:   }
248:   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
249:   *out = ++MaxComm;
250:   comm_active[CommIdx(*out)] = 1;
251:   return MPI_SUCCESS;
252: }

254: int MPI_Comm_free(MPI_Comm *comm)
255: {
256:   int i;
257:   int idx = CommIdx(*comm);

259:   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
260:   for (i=0; i<num_attr; i++) {
261:     if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
262:     attr[idx][i].active        = 0;
263:     attr[idx][i].attribute_val = 0;
264:   }
265:   if (*comm >= 3) comm_active[idx] = 0;
266:   *comm = 0;
267:   return MPI_SUCCESS;
268: }

270: int MPI_Comm_size(MPI_Comm comm, int *size)
271: {
272:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
273:   *size=1;
274:   return MPI_SUCCESS;
275: }

277: int MPI_Comm_rank(MPI_Comm comm, int *rank)
278: {
279:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
280:   *rank=0;
281:   return MPI_SUCCESS;
282: }

284: int MPIUni_Abort(MPI_Comm comm,int errorcode)
285: {
286:   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
287:   return MPI_FAILURE;
288: }

290: int MPI_Abort(MPI_Comm comm,int errorcode)
291: {
292:   abort();
293:   return MPI_SUCCESS;
294: }

296: /* --------------------------------------------------------------------------*/

298: static int MPI_was_initialized = 0;
299: static int MPI_was_finalized   = 0;

301: int MPI_Init(int *argc, char ***argv)
302: {
303:   if (MPI_was_initialized) return MPI_FAILURE;
304:   if (MPI_was_finalized) return MPI_FAILURE; /* MPI standard: once MPI_FINALIZE returns, no MPI routine (not even MPI_INIT) may be called, except ... */
305:   MPI_was_initialized = 1;
306:   return MPI_SUCCESS;
307: }

309: int MPI_Finalize(void)
310: {
311:   MPI_Comm comm;
312:   if (MPI_was_finalized) return MPI_FAILURE;
313:   if (!MPI_was_initialized) return MPI_FAILURE;
314:   comm = MPI_COMM_WORLD;
315:   MPI_Comm_free(&comm);
316:   comm = MPI_COMM_SELF;
317:   MPI_Comm_free(&comm);
318: #if defined(PETSC_USE_DEBUG)
319:   {
320:     int i;
321:     for (i=3; i<=MaxComm; i++) {
322:       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
323:     }
324:   }
325: #endif
326:   /* reset counters */
327:   MaxComm  = 2;
328:   num_attr = 1;
329:   MPI_was_finalized = 1;
330:   return MPI_SUCCESS;
331: }

333: int MPI_Initialized(int *flag)
334: {
335:   *flag = MPI_was_initialized;
336:   return MPI_SUCCESS;
337: }

339: int MPI_Finalized(int *flag)
340: {
341:   *flag = MPI_was_finalized;
342:   return MPI_SUCCESS;
343: }

345: /* -------------------     Fortran versions of several routines ------------------ */

347: #if defined(PETSC_HAVE_FORTRAN_CAPS)
348: #define mpiunisetmoduleblock_          MPIUNISETMODULEBLOCK
349: #define mpiunisetfortranbasepointers_  MPIUNISETFORTRANBASEPOINTERS
350: #define petsc_mpi_init_                PETSC_MPI_INIT
351: #define petsc_mpi_finalize_            PETSC_MPI_FINALIZE
352: #define petsc_mpi_comm_size_           PETSC_MPI_COMM_SIZE
353: #define petsc_mpi_comm_rank_           PETSC_MPI_COMM_RANK
354: #define petsc_mpi_abort_               PETSC_MPI_ABORT
355: #define petsc_mpi_reduce_              PETSC_MPI_REDUCE
356: #define petsc_mpi_allreduce_           PETSC_MPI_ALLREDUCE
357: #define petsc_mpi_barrier_             PETSC_MPI_BARRIER
358: #define petsc_mpi_bcast_               PETSC_MPI_BCAST
359: #define petsc_mpi_gather_              PETSC_MPI_GATHER
360: #define petsc_mpi_allgather_           PETSC_MPI_ALLGATHER
361: #define petsc_mpi_comm_split_          PETSC_MPI_COMM_SPLIT
362: #define petsc_mpi_scan_                PETSC_MPI_SCAN
363: #define petsc_mpi_send_                PETSC_MPI_SEND
364: #define petsc_mpi_recv_                PETSC_MPI_RECV
365: #define petsc_mpi_reduce_scatter_      PETSC_MPI_REDUCE_SCATTER
366: #define petsc_mpi_irecv_               PETSC_MPI_IRECV
367: #define petsc_mpi_isend_               PETSC_MPI_ISEND
368: #define petsc_mpi_sendrecv_            PETSC_MPI_SENDRECV
369: #define petsc_mpi_test_                PETSC_MPI_TEST
370: #define petsc_mpi_waitall_             PETSC_MPI_WAITALL
371: #define petsc_mpi_waitany_             PETSC_MPI_WAITANY
372: #define petsc_mpi_allgatherv_          PETSC_MPI_ALLGATHERV
373: #define petsc_mpi_alltoallv_           PETSC_MPI_ALLTOALLV
374: #define petsc_mpi_comm_create_         PETSC_MPI_COMM_CREATE
375: #define petsc_mpi_address_             PETSC_MPI_ADDRESS
376: #define petsc_mpi_pack_                PETSC_MPI_PACK
377: #define petsc_mpi_unpack_              PETSC_MPI_UNPACK
378: #define petsc_mpi_pack_size_           PETSC_MPI_PACK_SIZE
379: #define petsc_mpi_type_struct_         PETSC_MPI_TYPE_STRUCT
380: #define petsc_mpi_type_commit_         PETSC_MPI_TYPE_COMMIT
381: #define petsc_mpi_wtime_               PETSC_MPI_WTIME
382: #define petsc_mpi_cancel_              PETSC_MPI_CANCEL
383: #define petsc_mpi_comm_dup_            PETSC_MPI_COMM_DUP
384: #define petsc_mpi_comm_free_           PETSC_MPI_COMM_FREE
385: #define petsc_mpi_get_count_           PETSC_MPI_GET_COUNT
386: #define petsc_mpi_get_processor_name_  PETSC_MPI_GET_PROCESSOR_NAME
387: #define petsc_mpi_initialized_         PETSC_MPI_INITIALIZED
388: #define petsc_mpi_iprobe_              PETSC_MPI_IPROBE
389: #define petsc_mpi_probe_               PETSC_MPI_PROBE
390: #define petsc_mpi_request_free_        PETSC_MPI_REQUEST_FREE
391: #define petsc_mpi_ssend_               PETSC_MPI_SSEND
392: #define petsc_mpi_wait_                PETSC_MPI_WAIT
393: #define petsc_mpi_comm_group_          PETSC_MPI_COMM_GROUP
394: #define petsc_mpi_exscan_              PETSC_MPI_EXSCAN
395: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
396: #define mpiunisetmoduleblock_          mpiunisetmoduleblock
397: #define mpiunisetfortranbasepointers_  mpiunisetfortranbasepointers
398: #define petsc_mpi_init_                petsc_mpi_init
399: #define petsc_mpi_finalize_            petsc_mpi_finalize
400: #define petsc_mpi_comm_size_           petsc_mpi_comm_size
401: #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank
402: #define petsc_mpi_abort_               petsc_mpi_abort
403: #define petsc_mpi_reduce_              petsc_mpi_reduce
404: #define petsc_mpi_allreduce_           petsc_mpi_allreduce
405: #define petsc_mpi_barrier_             petsc_mpi_barrier
406: #define petsc_mpi_bcast_               petsc_mpi_bcast
407: #define petsc_mpi_gather_              petsc_mpi_gather
408: #define petsc_mpi_allgather_           petsc_mpi_allgather
409: #define petsc_mpi_comm_split_          petsc_mpi_comm_split
410: #define petsc_mpi_scan_                petsc_mpi_scan
411: #define petsc_mpi_send_                petsc_mpi_send
412: #define petsc_mpi_recv_                petsc_mpi_recv
413: #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter
414: #define petsc_mpi_irecv_               petsc_mpi_irecv
415: #define petsc_mpi_isend_               petsc_mpi_isend
416: #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv
417: #define petsc_mpi_test_                petsc_mpi_test
418: #define petsc_mpi_waitall_             petsc_mpi_waitall
419: #define petsc_mpi_waitany_             petsc_mpi_waitany
420: #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv
421: #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv
422: #define petsc_mpi_comm_create_         petsc_mpi_comm_create
423: #define petsc_mpi_address_             petsc_mpi_address
424: #define petsc_mpi_pack_                petsc_mpi_pack
425: #define petsc_mpi_unpack_              petsc_mpi_unpack
426: #define petsc_mpi_pack_size_           petsc_mpi_pack_size
427: #define petsc_mpi_type_struct_         petsc_mpi_type_struct
428: #define petsc_mpi_type_commit_         petsc_mpi_type_commit
429: #define petsc_mpi_wtime_               petsc_mpi_wtime
430: #define petsc_mpi_cancel_              petsc_mpi_cancel
431: #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup
432: #define petsc_mpi_comm_free_           petsc_mpi_comm_free
433: #define petsc_mpi_get_count_           petsc_mpi_get_count
434: #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name
435: #define petsc_mpi_initialized_         petsc_mpi_initialized
436: #define petsc_mpi_iprobe_              petsc_mpi_iprobe
437: #define petsc_mpi_probe_               petsc_mpi_probe
438: #define petsc_mpi_request_free_        petsc_mpi_request_free
439: #define petsc_mpi_ssend_               petsc_mpi_ssend
440: #define petsc_mpi_wait_                petsc_mpi_wait
441: #define petsc_mpi_comm_group_          petsc_mpi_comm_group
442: #define petsc_mpi_exscan_              petsc_mpi_exscan
443: #endif

445: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
446: #define petsc_mpi_init_                petsc_mpi_init__
447: #define petsc_mpi_finalize_            petsc_mpi_finalize__
448: #define petsc_mpi_comm_size_           petsc_mpi_comm_size__
449: #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank__
450: #define petsc_mpi_abort_               petsc_mpi_abort__
451: #define petsc_mpi_reduce_              petsc_mpi_reduce__
452: #define petsc_mpi_allreduce_           petsc_mpi_allreduce__
453: #define petsc_mpi_barrier_             petsc_mpi_barrier__
454: #define petsc_mpi_bcast_               petsc_mpi_bcast__
455: #define petsc_mpi_gather_              petsc_mpi_gather__
456: #define petsc_mpi_allgather_           petsc_mpi_allgather__
457: #define petsc_mpi_comm_split_          petsc_mpi_comm_split__
458: #define petsc_mpi_scan_                petsc_mpi_scan__
459: #define petsc_mpi_send_                petsc_mpi_send__
460: #define petsc_mpi_recv_                petsc_mpi_recv__
461: #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter__
462: #define petsc_mpi_irecv_               petsc_mpi_irecv__
463: #define petsc_mpi_isend_               petsc_mpi_isend__
464: #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv__
465: #define petsc_mpi_test_                petsc_mpi_test__
466: #define petsc_mpi_waitall_             petsc_mpi_waitall__
467: #define petsc_mpi_waitany_             petsc_mpi_waitany__
468: #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv__
469: #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv__
470: #define petsc_mpi_comm_create_         petsc_mpi_comm_create__
471: #define petsc_mpi_address_             petsc_mpi_address__
472: #define petsc_mpi_pack_                petsc_mpi_pack__
473: #define petsc_mpi_unpack_              petsc_mpi_unpack__
474: #define petsc_mpi_pack_size_           petsc_mpi_pack_size__
475: #define petsc_mpi_type_struct_         petsc_mpi_type_struct__
476: #define petsc_mpi_type_commit_         petsc_mpi_type_commit__
477: #define petsc_mpi_wtime_               petsc_mpi_wtime__
478: #define petsc_mpi_cancel_              petsc_mpi_cancel__
479: #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup__
480: #define petsc_mpi_comm_free_           petsc_mpi_comm_free__
481: #define petsc_mpi_get_count_           petsc_mpi_get_count__
482: #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name__
483: #define petsc_mpi_initialized_         petsc_mpi_initialized__
484: #define petsc_mpi_iprobe_              petsc_mpi_iprobe__
485: #define petsc_mpi_probe_               petsc_mpi_probe__
486: #define petsc_mpi_request_free_        petsc_mpi_request_free__
487: #define petsc_mpi_ssend_               petsc_mpi_ssend__
488: #define petsc_mpi_wait_                petsc_mpi_wait__
489: #define petsc_mpi_comm_group_          petsc_mpi_comm_group__
490: #define petsc_mpi_exscan_              petsc_mpi_exscan__
491: #endif

493: /* Do not build fortran interface if MPI namespace colision is to be avoided */
494: #if defined(PETSC_HAVE_FORTRAN)

496: PETSC_EXTERN void mpiunisetmoduleblock_(void);

498: PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
499: {
500:   MPIUNIF_mpi_in_place   = f_mpi_in_place;
501: }

503: PETSC_EXTERN void petsc_mpi_init_(int *ierr)
504: {
505:   mpiunisetmoduleblock_();
506:   *MPI_Init((int*)0, (char***)0);
507: }

509: PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
510: {
511:   *MPI_Finalize();
512: }

514: PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
515: {
516:   *size = 1;
517:   *0;
518: }

520: PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
521: {
522:   *rank = 0;
523:   *MPI_SUCCESS;
524: }

526: PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
527: {
528:   *newcomm = *comm;
529:   *MPI_SUCCESS;
530: }

532: PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
533: {
534:   abort();
535:   *MPI_SUCCESS;
536: }

538: PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
539: {
540:   *MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
541: }

543: PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
544: {
545:   *MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
546: }

548: PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
549: {
550:   *MPI_SUCCESS;
551: }

553: PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
554: {
555:   *MPI_SUCCESS;
556: }

558: PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
559: {
560:   *MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
561: }

563: PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
564: {
565:   *MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
566: }

568: PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
569: {
570:   *MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
571: }

573: PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
574: {
575:   *MPIUni_Abort(MPI_COMM_WORLD,0);
576: }

578: PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
579: {
580:   *MPIUni_Abort(MPI_COMM_WORLD,0);
581: }

583: PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
584: {
585:   *MPIUni_Abort(MPI_COMM_WORLD,0);
586: }

588: PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
589: {
590:   *MPIUni_Abort(MPI_COMM_WORLD,0);
591: }

593: PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
594: {
595:   *MPIUni_Abort(MPI_COMM_WORLD,0);
596: }

598: PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
599: {
600:   *MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
601: }

603: PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
604: {
605:   *MPIUni_Abort(MPI_COMM_WORLD,0);
606: }

608: PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
609: {
610:   *MPI_SUCCESS;
611: }

613: PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
614: {
615:   *MPI_SUCCESS;
616: }

618: PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
619: {
620:   *MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
621: }

623: PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
624: {
625:   *MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
626: }

628: PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
629: {
630:   *newcomm =  *comm;
631:   *MPI_SUCCESS;
632: }

634: PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
635: {
636:   *address =  (MPI_Aint) ((char *)location);
637:   *MPI_SUCCESS;
638: }

640: PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
641: {
642:   *MPIUni_Abort(MPI_COMM_WORLD,0);
643: }

645: PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
646: {
647:   *MPIUni_Abort(MPI_COMM_WORLD,0);
648: }

650: PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
651: {
652:   *MPIUni_Abort(MPI_COMM_WORLD,0);
653: }

655: PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
656: {
657:   *MPIUni_Abort(MPI_COMM_WORLD,0);
658: }

660: PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
661: {
662:   *MPI_SUCCESS;
663: }

665: double petsc_mpi_wtime_(void)
666: {
667:   return 0.0;
668: }

670: PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
671: {
672:   *MPI_SUCCESS;
673: }

675: PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
676: {
677:   *out  = *comm;
678:   *MPI_SUCCESS;
679: }

681: PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
682: {
683:   *MPI_SUCCESS;
684: }

686: PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
687: {
688:   *MPIUni_Abort(MPI_COMM_WORLD,0);
689: }

691: PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
692: {
693:   MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
694:   *result_len = 9;
695:   *MPI_SUCCESS;
696: }

698: PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
699: {
700:   *flag = MPI_was_initialized;
701:   *MPI_SUCCESS;
702: }

704: PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
705: {
706:   *MPI_SUCCESS;
707: }

709: PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
710: {
711:   *MPI_SUCCESS;
712: }

714: PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
715: {
716:   *MPI_SUCCESS;
717: }

719: PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
720: {
721:   *MPIUni_Abort(MPI_COMM_WORLD,0);
722: }

724: PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
725: {
726:   *MPI_SUCCESS;
727: }

729: PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
730: {
731:   *MPI_SUCCESS;
732: }

734: PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
735: {
736:   *MPI_SUCCESS;
737: }

739: #endif /* PETSC_HAVE_FORTRAN */

741: #if defined(__cplusplus)
742: }
743: #endif