Actual source code: mpi.h
petsc-3.10.2 2018-10-09
1: /*
2: This is a special set of bindings for uni-processor use of MPI by the PETSc library.
4: NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.
6: For example,
7: * Does not implement send to self.
8: * Does not implement attributes correctly.
9: */
11: /*
12: The following info is a response to one of the petsc-maint questions
13: regarding MPIUNI.
15: MPIUNI was developed with the aim of getting PETSc compiled, and
16: usable in the absence of a full MPI implementation. With this, we
17: were able to provide PETSc on Windows, Windows64 even before any MPI
18: implementation was available on these platforms. [Or with certain
19: compilers - like borland, that do not have a usable MPI
20: implementation]
22: However - providing a seqential, standards compliant MPI
23: implementation is *not* the goal of MPIUNI. The development strategy
24: was - to make enough changes to it so that PETSc sources, examples
25: compile without errors, and runs in the uni-processor mode. This is
26: the reason each function is not documented.
28: PETSc usage of MPIUNI is primarily from C. However a minimal fortran
29: interface is also provided - to get PETSc fortran examples with a
30: few MPI calls working.
32: One of the optimzation with MPIUNI, is to avoid the function call
33: overhead, when possible. Hence most of the C functions are
34: implemented as macros. However the function calls cannot be avoided
35: with fortran usage.
37: Most PETSc objects have both sequential and parallel
38: implementations, which are separate. For eg: We have two types of
39: sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
40: routines are used in the Seq part, but most of them are used in the
41: MPI part. The send/receive calls can be found mostly in the MPI
42: part.
44: When MPIUNI is used, only the Seq version of the PETSc objects are
45: used, even though the MPI variant of the objects are compiled. Since
46: there are no send/receive calls in the Seq variant, PETSc works fine
47: with MPIUNI in seq mode.
49: The reason some send/receive functions are defined to abort(), is to
50: detect sections of code that use send/receive functions, and gets
51: executed in the sequential mode. (which shouldn't happen in case of
52: PETSc).
54: Proper implementation of send/receive would involve writing a
55: function for each of them. Inside each of these functions, we have
56: to check if the send is to self or receive is from self, and then
57: doing the buffering accordingly (until the receive is called) - or
58: what if a nonblocking receive is called, do a copy etc.. Handling
59: the buffering aspects might be complicated enough, that in this
60: case, a proper implementation of MPI might as well be used. This is
61: the reason the send to self is not implemented in MPIUNI, and never
62: will be.
64: Proper implementations of MPI [for eg: MPICH & OpenMPI] are
65: available for most machines. When these packages are available, Its
66: generally preferable to use one of them instead of MPIUNI - even if
67: the user is using PETSc sequentially.
69: - MPIUNI does not support all MPI functions [or functionality].
70: Hence it might not work with external packages or user code that
71: might have MPI calls in it.
73: - MPIUNI is not a standards compliant implementation for np=1.
74: For eg: if the user code has send/recv to self, then it will
75: abort. [Similar issues with a number of other MPI functionality]
76: However MPICH & OpenMPI are the correct implementations of MPI
77: standard for np=1.
79: - When user code uses multiple MPI based packages that have their
80: own *internal* stubs equivalent to MPIUNI - in sequential mode,
81: invariably these multiple implementations of MPI for np=1 conflict
82: with each other. The correct thing to do is: make all such
83: packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
84: satisfy this requirement correctly [and hence the correct choice].
86: - Using MPICH/OpenMPI sequentially should have minimal
87: disadvantages. [for eg: these binaries can be run without
88: mpirun/mpiexec as ./executable, without requiring any extra
89: configurations for ssh/rsh/daemons etc..]. This should not be a
90: reason to avoid these packages for sequential use.
92: Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
93: - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
94: - remove reference to petscconf.h from mpi.h
95: - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
96: - ar cr libmpiuni.a mpi.o
98: */
103: /* Required by abort() in mpi.c & for win64 */
104: #include <petscconf.h>
105: #include <stddef.h>
107: /* This is reproduced from petscsys.h so that mpi.h can be used standalone without first including petscsys.h */
108: #if defined(_WIN32) && defined(PETSC_USE_SHARED_LIBRARIES)
109: # define MPIUni_ __declspec(dllexport)
110: # define MPIUni_PETSC_DLLIMPORT __declspec(dllimport)
111: #elif defined(PETSC_USE_VISIBILITY_CXX) && defined(__cplusplus)
112: # define MPIUni_ __attribute__((visibility ("default")))
113: # define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
114: #elif defined(PETSC_USE_VISIBILITY_C) && !defined(__cplusplus)
115: # define MPIUni_ __attribute__((visibility ("default")))
116: # define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
117: #else
118: # define MPIUni_
119: # define MPIUni_PETSC_DLLIMPORT
120: #endif
122: #if defined(petsc_EXPORTS)
123: # define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_
124: #else /* Win32 users need this to import symbols from petsc.dll */
125: # define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_PETSC_DLLIMPORT
126: #endif
128: #if defined(__cplusplus)
129: #define MPIUni_PETSC_EXTERN extern "C" MPIUni_PETSC_VISIBILITY_PUBLIC
130: #else
131: #define MPIUni_PETSC_EXTERN extern MPIUni_PETSC_VISIBILITY_PUBLIC
132: #endif
134: #if defined(__cplusplus)
135: extern "C" {
136: #endif
138: /* MPI_Aint has to be an signed integral type large enough to hold a pointer */
139: #if PETSC_SIZEOF_INT == PETSC_SIZEOF_VOID_P
140: typedef int MPI_Aint;
141: #elif PETSC_SIZEOF_LONG == PETSC_SIZEOF_VOID_P
142: typedef long MPI_Aint;
143: #else
144: typedef ptrdiff_t MPI_Aint;
145: #endif
147: /* old 32bit MS compiler does not support long long */
148: #if defined(PETSC_SIZEOF_LONG_LONG)
149: typedef long long MPIUNI_INT64;
150: typedef unsigned long long MPIUNI_UINT64;
151: #elif defined(PETSC_HAVE___INT64)
152: typedef _int64 MPIUNI_INT64;
153: typedef unsigned _int64 MPIUNI_UINT64;
154: #else
155: #error "cannot determine MPIUNI_INT64, MPIUNI_UINT64 types"
156: #endif
158: /*
160: MPIUNI_ARG is used in the macros below only to stop various C/C++ compilers
161: from generating warning messages about unused variables while compiling PETSc.
162: */
163: MPIUni_PETSC_EXTERN void *MPIUNI_TMP;
164: #define MPIUNI_ARG(arg) (MPIUNI_TMP = (void *)(MPI_Aint) (arg))
166: #define MPI_IDENT 0
167: #define MPI_CONGRUENT 1
168: #define MPI_SIMILAR 2
169: #define MPI_UNEQUAL 3
171: #define MPI_BOTTOM ((void *) 0)
172: #define MPI_IN_PLACE ((void *)-1)
174: #define MPI_PROC_NULL (-1)
175: #define MPI_ANY_SOURCE (-2)
176: #define MPI_ANY_TAG (-1)
177: #define MPI_UNDEFINED (-32766)
179: #define MPI_SUCCESS 0
180: #define MPI_ERR_OTHER 17
181: #define MPI_ERR_UNKNOWN 18
182: #define MPI_ERR_INTERN 21
184: #define MPI_KEYVAL_INVALID 0
185: #define MPI_TAG_UB 0
187: #define MPI_MAX_PROCESSOR_NAME 1024
188: #define MPI_MAX_ERROR_STRING 2056
190: typedef int MPI_Comm;
191: #define MPI_COMM_NULL 0
192: #define MPI_COMM_SELF 1
193: #define MPI_COMM_WORLD 2
194: #define MPI_COMM_TYPE_SHARED 1
196: typedef int MPI_Info;
197: #define MPI_INFO_NULL 0
199: typedef struct {int MPI_SOURCE,MPI_TAG,MPI_ERROR;} MPI_Status;
200: #define MPI_STATUS_IGNORE (MPI_Status *)0
201: #define MPI_STATUSES_IGNORE (MPI_Status *)0
203: /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
204: /* Any changes here must also be reflected in mpif.h */
205: typedef int MPI_Datatype;
206: #define MPI_DATATYPE_NULL 0
207: #define MPI_PACKED 0
209: #define MPI_FLOAT (1 << 20 | 1 << 8 | (int)sizeof(float))
210: #define MPI_DOUBLE (1 << 20 | 1 << 8 | (int)sizeof(double))
211: #define MPI_LONG_DOUBLE (1 << 20 | 1 << 8 | (int)sizeof(long double))
213: #define MPI_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
214: #define MPI_C_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
215: #define MPI_C_FLOAT_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
216: #define MPI_DOUBLE_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(double))
217: #define MPI_C_DOUBLE_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(double))
219: #define MPI_CHAR (3 << 20 | 1 << 8 | (int)sizeof(char))
220: #define MPI_BYTE (3 << 20 | 1 << 8 | (int)sizeof(char))
221: #define MPI_SIGNED_CHAR (3 << 20 | 1 << 8 | (int)sizeof(signed char))
222: #define MPI_UNSIGNED_CHAR (3 << 20 | 1 << 8 | (int)sizeof(unsigned char))
224: #define MPI_SHORT (4 << 20 | 1 << 8 | (int)sizeof(short))
225: #define MPI_INT (4 << 20 | 1 << 8 | (int)sizeof(int))
226: #define MPI_LONG (4 << 20 | 1 << 8 | (int)sizeof(long))
227: #define MPI_LONG_LONG (4 << 20 | 1 << 8 | (int)sizeof(MPIUNI_INT64))
228: #define MPI_LONG_LONG_INT MPI_LONG_LONG
229: #define MPI_INTEGER8 MPI_LONG_LONG
231: #define MPI_UNSIGNED_SHORT (5 << 20 | 1 << 8 | (int)sizeof(unsigned short))
232: #define MPI_UNSIGNED (5 << 20 | 1 << 8 | (int)sizeof(unsigned))
233: #define MPI_UNSIGNED_LONG (5 << 20 | 1 << 8 | (int)sizeof(unsigned long))
234: #define MPI_UNSIGNED_LONG_LONG (5 << 20 | 1 << 8 | (int)sizeof(MPIUNI_UINT64))
236: #define MPI_FLOAT_INT (10 << 20 | 1 << 8 | (int)(sizeof(float) + sizeof(int)))
237: #define MPI_DOUBLE_INT (11 << 20 | 1 << 8 | (int)(sizeof(double) + sizeof(int)))
238: #define MPI_LONG_INT (12 << 20 | 1 << 8 | (int)(sizeof(long) + sizeof(int)))
239: #define MPI_SHORT_INT (13 << 20 | 1 << 8 | (int)(sizeof(short) + sizeof(int)))
240: #define MPI_2INT (14 << 20 | 1 << 8 | (int)(2*sizeof(int)))
242: /* Fortran datatypes; Jed Brown says they should be defined here */
243: #define MPI_INTEGER MPI_INT
244: #define MPI_DOUBLE_PRECISION MPI_DOUBLE
245: #define MPI_COMPLEX16 MPI_C_DOUBLE_COMPLEX
247: #define MPI_ORDER_C 0
248: #define MPI_ORDER_FORTRAN 1
250: #define MPI_sizeof_default(datatype) ((((datatype) >> 8) & 0xfff) * ((datatype) & 0xff))
251: #if defined(PETSC_USE_REAL___FP16)
252: MPIUni_PETSC_EXTERN MPI_Datatype MPIU___FP16;
253: #define MPI_sizeof(datatype) ((datatype == MPIU___FP16) ? (int)(2*sizeof(char)) : MPI_sizeof_default(datatype))
254: #elif defined(PETSC_USE_REAL___FLOAT128)
255: MPIUni_PETSC_EXTERN MPI_Datatype MPIU___FLOAT128;
256: #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? (int)(2*sizeof(double)) : MPI_sizeof_default(datatype))
257: #else
258: #define MPI_sizeof(datatype) (MPI_sizeof_default(datatype))
259: #endif
261: MPIUni_PETSC_EXTERN int MPIUNI_Memcpy(void*,const void*,int);
263: typedef int MPI_Request;
264: #define MPI_REQUEST_NULL 0
266: typedef int MPI_Group;
267: #define MPI_GROUP_NULL 0
268: #define MPI_GROUP_EMPTY 0
270: typedef int MPI_Op;
271: #define MPI_OP_NULL 0
272: #define MPI_SUM 1
273: #define MPI_MAX 2
274: #define MPI_MIN 3
275: #define MPI_REPLACE 4
276: #define MPI_PROD 5
277: #define MPI_LAND 6
278: #define MPI_BAND 7
279: #define MPI_LOR 8
280: #define MPI_BOR 9
281: #define MPI_LXOR 10
282: #define MPI_BXOR 11
283: #define MPI_MAXLOC 12
284: #define MPI_MINLOC 13
286: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);
288: typedef int MPI_Errhandler;
289: #define MPI_ERRHANDLER_NULL 0
290: #define MPI_ERRORS_RETURN 0
291: #define MPI_ERRORS_ARE_FATAL 0
292: typedef void (MPI_Handler_function)(MPI_Comm *, int *, ...);
294: /*
295: Prototypes of some functions which are implemented in mpi.c
296: */
297: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
298: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
299: #define MPI_NULL_COPY_FN (MPI_Copy_function*)0
300: #define MPI_NULL_DELETE_FN (MPI_Delete_function*)0
302: /*
303: To enable linking PETSc+MPIUNI with any other package that might have its
304: own MPIUNI (equivalent implementation) we need to avoid using 'MPI'
305: namespace for MPIUNI functions that go into the petsc library.
307: For C functions below (that get compiled into petsc library) - we map
308: the 'MPI' functions to use 'Petsc_MPI' namespace.
310: With fortran we use similar mapping - thus requiring the use of
311: c-preprocessor with mpif.h
312: */
313: #define MPI_Abort Petsc_MPI_Abort
314: #define MPIUni_Abort Petsc_MPIUni_Abort
315: #define MPI_Attr_get Petsc_MPI_Attr_get
316: #define MPI_Keyval_free Petsc_MPI_Keyval_free
317: #define MPI_Attr_put Petsc_MPI_Attr_put
318: #define MPI_Attr_delete Petsc_MPI_Attr_delete
319: #define MPI_Keyval_create Petsc_MPI_Keyval_create
320: #define MPI_Comm_free Petsc_MPI_Comm_free
321: #define MPI_Comm_dup Petsc_MPI_Comm_dup
322: #define MPI_Comm_create Petsc_MPI_Comm_create
323: #define MPI_Init Petsc_MPI_Init
324: #define MPI_Finalize Petsc_MPI_Finalize
325: #define MPI_Initialized Petsc_MPI_Initialized
326: #define MPI_Finalized Petsc_MPI_Finalized
327: #define MPI_Comm_size Petsc_MPI_Comm_size
328: #define MPI_Comm_rank Petsc_MPI_Comm_rank
329: #define MPI_Wtime Petsc_MPI_Wtime
330: #define MPI_Type_get_envelope Petsc_MPI_Type_get_envelope
331: #define MPI_Type_get_contents Petsc_MPI_Type_get_contents
332: #define MPI_Add_error_class Petsc_MPI_Add_error_class
333: #define MPI_Add_error_code Petsc_MPI_Add_error_code
335: /* identical C bindings */
336: #define MPI_Comm_copy_attr_function MPI_Copy_function
337: #define MPI_Comm_delete_attr_function MPI_Delete_function
338: #define MPI_COMM_NULL_COPY_FN MPI_NULL_COPY_FN
339: #define MPI_COMM_NULL_DELETE_FN MPI_NULL_DELETE_FN
340: #define MPI_Comm_create_keyval Petsc_MPI_Keyval_create
341: #define MPI_Comm_free_keyval Petsc_MPI_Keyval_free
342: #define MPI_Comm_get_attr Petsc_MPI_Attr_get
343: #define MPI_Comm_set_attr Petsc_MPI_Attr_put
344: #define MPI_Comm_delete_attr Petsc_MPI_Attr_delete
346: MPIUni_PETSC_EXTERN int MPIUni_Abort(MPI_Comm,int);
347: MPIUni_PETSC_EXTERN int MPI_Abort(MPI_Comm,int);
348: MPIUni_PETSC_EXTERN int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag);
349: MPIUni_PETSC_EXTERN int MPI_Keyval_free(int*);
350: MPIUni_PETSC_EXTERN int MPI_Attr_put(MPI_Comm,int,void *);
351: MPIUni_PETSC_EXTERN int MPI_Attr_delete(MPI_Comm,int);
352: MPIUni_PETSC_EXTERN int MPI_Keyval_create(MPI_Copy_function *,MPI_Delete_function *,int *,void *);
353: MPIUni_PETSC_EXTERN int MPI_Comm_free(MPI_Comm*);
354: MPIUni_PETSC_EXTERN int MPI_Comm_dup(MPI_Comm,MPI_Comm *);
355: MPIUni_PETSC_EXTERN int MPI_Comm_create(MPI_Comm,MPI_Group,MPI_Comm *);
356: MPIUni_PETSC_EXTERN int MPI_Init(int *, char ***);
357: MPIUni_PETSC_EXTERN int MPI_Finalize(void);
358: MPIUni_PETSC_EXTERN int MPI_Initialized(int*);
359: MPIUni_PETSC_EXTERN int MPI_Finalized(int*);
360: MPIUni_PETSC_EXTERN int MPI_Comm_size(MPI_Comm,int*);
361: MPIUni_PETSC_EXTERN int MPI_Comm_rank(MPI_Comm,int*);
362: MPIUni_PETSC_EXTERN double MPI_Wtime(void);
364: MPIUni_PETSC_EXTERN int MPI_Type_get_envelope(MPI_Datatype,int*,int*,int*,int*);
365: MPIUni_PETSC_EXTERN int MPI_Type_get_contents(MPI_Datatype,int,int,int,int*,MPI_Aint*,MPI_Datatype*);
366: MPIUni_PETSC_EXTERN int MPI_Add_error_class(int*);
367: MPIUni_PETSC_EXTERN int MPI_Add_error_code(int,int*);
369: /*
370: Routines we have replace with macros that do nothing
371: Some return error codes others return success
372: */
374: typedef int MPI_Fint;
375: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
376: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
377: #define MPI_Type_f2c(type) (MPI_Datatype)(type)
378: #define MPI_Type_c2f(type) (MPI_Fint)(type)
379: #define MPI_Op_f2c(op) (MPI_Op)(op)
380: #define MPI_Op_c2f(op) (MPI_Fint)(op)
382: #define MPI_Send(buf,count,datatype,dest,tag,comm) \
383: (MPIUNI_ARG(buf),\
384: MPIUNI_ARG(count),\
385: MPIUNI_ARG(datatype),\
386: MPIUNI_ARG(dest),\
387: MPIUNI_ARG(tag),\
388: MPIUNI_ARG(comm),\
389: MPIUni_Abort(MPI_COMM_WORLD,0))
390: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
391: (MPIUNI_ARG(buf),\
392: MPIUNI_ARG(count),\
393: MPIUNI_ARG(datatype),\
394: MPIUNI_ARG(source),\
395: MPIUNI_ARG(tag),\
396: MPIUNI_ARG(comm),\
397: MPIUNI_ARG(status),\
398: MPIUni_Abort(MPI_COMM_WORLD,0))
399: #define MPI_Get_count(status,datatype,count) \
400: (MPIUNI_ARG(status),\
401: MPIUNI_ARG(datatype),\
402: MPIUNI_ARG(count),\
403: MPIUni_Abort(MPI_COMM_WORLD,0))
404: #define MPI_Bsend(buf,count,datatype,dest,tag,comm) \
405: (MPIUNI_ARG(buf),\
406: MPIUNI_ARG(count),\
407: MPIUNI_ARG(datatype),\
408: MPIUNI_ARG(dest),\
409: MPIUNI_ARG(tag),\
410: MPIUNI_ARG(comm),\
411: MPIUni_Abort(MPI_COMM_WORLD,0))
412: #define MPI_Ssend(buf,count,datatype,dest,tag,comm) \
413: (MPIUNI_ARG(buf),\
414: MPIUNI_ARG(count),\
415: MPIUNI_ARG(datatype),\
416: MPIUNI_ARG(dest),\
417: MPIUNI_ARG(tag),\
418: MPIUNI_ARG(comm),\
419: MPIUni_Abort(MPI_COMM_WORLD,0))
420: #define MPI_Rsend(buf,count,datatype,dest,tag,comm) \
421: (MPIUNI_ARG(buf),\
422: MPIUNI_ARG(count),\
423: MPIUNI_ARG(datatype),\
424: MPIUNI_ARG(dest),\
425: MPIUNI_ARG(tag),\
426: MPIUNI_ARG(comm),\
427: MPIUni_Abort(MPI_COMM_WORLD,0))
428: #define MPI_Buffer_attach(buffer,size) \
429: (MPIUNI_ARG(buffer),\
430: MPIUNI_ARG(size),\
431: MPI_SUCCESS)
432: #define MPI_Buffer_detach(buffer,size)\
433: (MPIUNI_ARG(buffer),\
434: MPIUNI_ARG(size),\
435: MPI_SUCCESS)
436: #define MPI_Ibsend(buf,count,datatype,dest,tag,comm,request) \
437: (MPIUNI_ARG(buf),\
438: MPIUNI_ARG(count),\
439: MPIUNI_ARG(datatype),\
440: MPIUNI_ARG(dest),\
441: MPIUNI_ARG(tag),\
442: MPIUNI_ARG(comm),\
443: MPIUNI_ARG(request),\
444: MPIUni_Abort(MPI_COMM_WORLD,0))
445: #define MPI_Issend(buf,count,datatype,dest,tag,comm,request) \
446: (MPIUNI_ARG(buf),\
447: MPIUNI_ARG(count),\
448: MPIUNI_ARG(datatype),\
449: MPIUNI_ARG(dest),\
450: MPIUNI_ARG(tag),\
451: MPIUNI_ARG(comm),\
452: MPIUNI_ARG(request),\
453: MPIUni_Abort(MPI_COMM_WORLD,0))
454: #define MPI_Irsend(buf,count,datatype,dest,tag,comm,request) \
455: (MPIUNI_ARG(buf),\
456: MPIUNI_ARG(count),\
457: MPIUNI_ARG(datatype),\
458: MPIUNI_ARG(dest),\
459: MPIUNI_ARG(tag),\
460: MPIUNI_ARG(comm),\
461: MPIUNI_ARG(request),\
462: MPIUni_Abort(MPI_COMM_WORLD,0))
463: #define MPI_Irecv(buf,count,datatype,source,tag,comm,request) \
464: (MPIUNI_ARG(buf),\
465: MPIUNI_ARG(count),\
466: MPIUNI_ARG(datatype),\
467: MPIUNI_ARG(source),\
468: MPIUNI_ARG(tag),\
469: MPIUNI_ARG(comm),\
470: MPIUNI_ARG(request),\
471: MPIUni_Abort(MPI_COMM_WORLD,0))
472: #define MPI_Isend(buf,count,datatype,dest,tag,comm,request) \
473: (MPIUNI_ARG(buf),\
474: MPIUNI_ARG(count),\
475: MPIUNI_ARG(datatype),\
476: MPIUNI_ARG(dest),\
477: MPIUNI_ARG(tag),\
478: MPIUNI_ARG(comm),\
479: MPIUNI_ARG(request),\
480: MPIUni_Abort(MPI_COMM_WORLD,0))
481: #define MPI_Wait(request,status) \
482: (MPIUNI_ARG(request),\
483: MPIUNI_ARG(status),\
484: MPI_SUCCESS)
485: #define MPI_Test(request,flag,status) \
486: (MPIUNI_ARG(request),\
487: MPIUNI_ARG(status),\
488: *(flag) = 0,\
489: MPI_SUCCESS)
490: #define MPI_Request_free(request) \
491: (MPIUNI_ARG(request),\
492: MPI_SUCCESS)
493: #define MPI_Waitany(count,array_of_requests,index,status) \
494: (MPIUNI_ARG(count),\
495: MPIUNI_ARG(array_of_requests),\
496: MPIUNI_ARG(status),\
497: *(index) = 0,\
498: MPI_SUCCESS)
499: #define MPI_Testany(a,b,c,d,e) \
500: (MPIUNI_ARG(a),\
501: MPIUNI_ARG(b),\
502: MPIUNI_ARG(c),\
503: MPIUNI_ARG(d),\
504: MPIUNI_ARG(e),\
505: MPI_SUCCESS)
506: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
507: (MPIUNI_ARG(count),\
508: MPIUNI_ARG(array_of_requests),\
509: MPIUNI_ARG(array_of_statuses),\
510: MPI_SUCCESS)
511: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
512: (MPIUNI_ARG(count),\
513: MPIUNI_ARG(array_of_requests),\
514: MPIUNI_ARG(flag),\
515: MPIUNI_ARG(array_of_statuses),\
516: MPI_SUCCESS)
517: #define MPI_Waitsome(incount,array_of_requests,outcount,\
518: array_of_indices,array_of_statuses) \
519: (MPIUNI_ARG(incount),\
520: MPIUNI_ARG(array_of_requests),\
521: MPIUNI_ARG(outcount),\
522: MPIUNI_ARG(array_of_indices),\
523: MPIUNI_ARG(array_of_statuses),\
524: MPI_SUCCESS)
525: #define MPI_Comm_group(comm,group) \
526: (MPIUNI_ARG(comm),\
527: *group = 1,\
528: MPI_SUCCESS)
529: #define MPI_Group_incl(group,n,ranks,newgroup) \
530: (MPIUNI_ARG(group),\
531: MPIUNI_ARG(n),\
532: MPIUNI_ARG(ranks),\
533: MPIUNI_ARG(newgroup),\
534: MPI_SUCCESS)
535: #define MPI_Testsome(incount,array_of_requests,outcount,\
536: array_of_indices,array_of_statuses) \
537: (MPIUNI_ARG(incount),\
538: MPIUNI_ARG(array_of_requests),\
539: MPIUNI_ARG(outcount),\
540: MPIUNI_ARG(array_of_indices),\
541: MPIUNI_ARG(array_of_statuses),\
542: MPI_SUCCESS)
543: #define MPI_Iprobe(source,tag,comm,flag,status) \
544: (MPIUNI_ARG(source),\
545: MPIUNI_ARG(tag),\
546: MPIUNI_ARG(comm),\
547: *(flag)=0,\
548: MPIUNI_ARG(status),\
549: MPI_SUCCESS)
550: #define MPI_Probe(source,tag,comm,status) \
551: (MPIUNI_ARG(source),\
552: MPIUNI_ARG(tag),\
553: MPIUNI_ARG(comm),\
554: MPIUNI_ARG(status),\
555: MPI_SUCCESS)
556: #define MPI_Cancel(request) \
557: (MPIUNI_ARG(request),\
558: MPI_SUCCESS)
559: #define MPI_Test_cancelled(status,flag) \
560: (MPIUNI_ARG(status),\
561: *(flag)=0,\
562: MPI_SUCCESS)
563: #define MPI_Send_init(buf,count,datatype,dest,tag,comm,request) \
564: (MPIUNI_ARG(buf),\
565: MPIUNI_ARG(count),\
566: MPIUNI_ARG(datatype),\
567: MPIUNI_ARG(dest),\
568: MPIUNI_ARG(tag),\
569: MPIUNI_ARG(comm),\
570: MPIUNI_ARG(request),\
571: MPI_SUCCESS)
572: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
573: (MPIUNI_ARG(buf),\
574: MPIUNI_ARG(count),\
575: MPIUNI_ARG(datatype),\
576: MPIUNI_ARG(dest),\
577: MPIUNI_ARG(tag),\
578: MPIUNI_ARG(comm),\
579: MPIUNI_ARG(request),\
580: MPI_SUCCESS)
581: #define MPI_Ssend_init(buf,count,datatype,dest,tag,comm,request) \
582: (MPIUNI_ARG(buf),\
583: MPIUNI_ARG(count),\
584: MPIUNI_ARG(datatype),\
585: MPIUNI_ARG(dest),\
586: MPIUNI_ARG(tag),\
587: MPIUNI_ARG(comm),\
588: MPIUNI_ARG(request),\
589: MPI_SUCCESS)
590: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
591: (MPIUNI_ARG(buf),\
592: MPIUNI_ARG(count),\
593: MPIUNI_ARG(datatype),\
594: MPIUNI_ARG(dest),\
595: MPIUNI_ARG(tag),\
596: MPIUNI_ARG(comm),\
597: MPIUNI_ARG(request),\
598: MPI_SUCCESS)
599: #define MPI_Rsend_init(buf,count,datatype,dest,tag,comm,request) \
600: (MPIUNI_ARG(buf),\
601: MPIUNI_ARG(count),\
602: MPIUNI_ARG(datatype),\
603: MPIUNI_ARG(dest),\
604: MPIUNI_ARG(tag),\
605: MPIUNI_ARG(comm),\
606: MPIUNI_ARG(request),\
607: MPI_SUCCESS)
608: #define MPI_Recv_init(buf,count,datatype,source,tag,comm,request) \
609: (MPIUNI_ARG(buf),\
610: MPIUNI_ARG(count),\
611: MPIUNI_ARG(datatype),\
612: MPIUNI_ARG(source),\
613: MPIUNI_ARG(tag),\
614: MPIUNI_ARG(comm),\
615: MPIUNI_ARG(request),\
616: MPI_SUCCESS)
617: #define MPI_Start(request) \
618: (MPIUNI_ARG(request),\
619: MPI_SUCCESS)
620: #define MPI_Startall(count,array_of_requests) \
621: (MPIUNI_ARG(count),\
622: MPIUNI_ARG(array_of_requests),\
623: MPI_SUCCESS)
624: #define MPI_Sendrecv(sendbuf,sendcount,sendtype,\
625: dest,sendtag,recvbuf,recvcount,\
626: recvtype,source,recvtag,\
627: comm,status) \
628: (MPIUNI_ARG(dest),\
629: MPIUNI_ARG(sendtag),\
630: MPIUNI_ARG(recvcount),\
631: MPIUNI_ARG(recvtype),\
632: MPIUNI_ARG(source),\
633: MPIUNI_ARG(recvtag),\
634: MPIUNI_ARG(comm),\
635: MPIUNI_ARG(status),\
636: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
637: #define MPI_Sendrecv_replace(buf,count,datatype,dest,sendtag,\
638: source,recvtag,comm,status) \
639: (MPIUNI_ARG(buf),\
640: MPIUNI_ARG(count),\
641: MPIUNI_ARG(datatype),\
642: MPIUNI_ARG(dest),\
643: MPIUNI_ARG(sendtag),\
644: MPIUNI_ARG(source),\
645: MPIUNI_ARG(recvtag),\
646: MPIUNI_ARG(comm),\
647: MPIUNI_ARG(status),\
648: MPI_SUCCESS)
650: #define MPI_COMBINER_NAMED 0
651: #define MPI_COMBINER_DUP 1
652: #define MPI_COMBINER_CONTIGUOUS 2
653: /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
654: #define MPI_Type_dup(oldtype,newtype) \
655: (*(newtype) = oldtype, MPI_SUCCESS)
656: #define MPI_Type_contiguous(count,oldtype,newtype) \
657: (*(newtype) = (MPI_COMBINER_CONTIGUOUS<<28)|((oldtype)&0x0ff00000)|(((oldtype)>>8&0xfff)*(count))<<8|((oldtype)&0xff), MPI_SUCCESS)
658: #define MPI_Type_vector(count,blocklength,stride,oldtype,newtype) \
659: (MPIUNI_ARG(count),\
660: MPIUNI_ARG(blocklength),\
661: MPIUNI_ARG(stride),\
662: MPIUNI_ARG(oldtype),\
663: MPIUNI_ARG(newtype),\
664: MPIUni_Abort(MPI_COMM_WORLD,0))
665: #define MPI_Type_hvector(count,blocklength,stride,oldtype,newtype) \
666: (MPIUNI_ARG(count),\
667: MPIUNI_ARG(blocklength),\
668: MPIUNI_ARG(stride),\
669: MPIUNI_ARG(oldtype),\
670: MPIUNI_ARG(newtype),\
671: MPIUni_Abort(MPI_COMM_WORLD,0))
672: #define MPI_Type_indexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
673: (MPIUNI_ARG(count),\
674: MPIUNI_ARG(array_of_blocklengths),\
675: MPIUNI_ARG(array_of_displacements),\
676: MPIUNI_ARG(oldtype),\
677: MPIUNI_ARG(newtype),\
678: MPIUni_Abort(MPI_COMM_WORLD,0))
679: #define MPI_Type_hindexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
680: (MPIUNI_ARG(count),\
681: MPIUNI_ARG(array_of_blocklengths),\
682: MPIUNI_ARG(array_of_displacements),\
683: MPIUNI_ARG(oldtype),\
684: MPIUNI_ARG(newtype),\
685: MPIUni_Abort(MPI_COMM_WORLD,0))
686: #define MPI_Type_struct(count,array_of_blocklengths,array_of_displacements,array_of_types,newtype) \
687: (MPIUNI_ARG(count),\
688: MPIUNI_ARG(array_of_blocklengths),\
689: MPIUNI_ARG(array_of_displacements),\
690: MPIUNI_ARG(array_of_types),\
691: MPIUNI_ARG(newtype),\
692: MPIUni_Abort(MPI_COMM_WORLD,0))
693: #define MPI_Address(location,address) \
694: (*(address) = (MPI_Aint)((char *)(location)), MPI_SUCCESS)
695: #define MPI_Type_size(datatype,size) (*(size) = MPI_sizeof((datatype)), MPI_SUCCESS)
696: #define MPI_Type_lb(datatype,lb) (MPIUNI_ARG(datatype), *(lb) = 0, MPI_SUCCESS)
697: #define MPI_Type_ub(datatype,ub) (*(ub) = MPI_sizeof((datatype)), MPI_SUCCESS)
698: #define MPI_Type_extent(datatype,extent) \
699: (*(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
700: #define MPI_Type_get_extent(datatype,lb,extent) \
701: (*(lb) = 0, *(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
702: #define MPI_Type_commit(datatype) (MPIUNI_ARG(datatype), MPI_SUCCESS)
703: #define MPI_Type_free(datatype) (*(datatype) = MPI_DATATYPE_NULL, MPI_SUCCESS)
704: #define MPI_Get_elements(status,datatype,count) \
705: (MPIUNI_ARG(status),\
706: MPIUNI_ARG(datatype),\
707: MPIUNI_ARG(count),\
708: MPIUni_Abort(MPI_COMM_WORLD,0))
709: #define MPI_Pack(inbuf,incount,datatype,outbuf,outsize,position,comm) \
710: (MPIUNI_ARG(inbuf),\
711: MPIUNI_ARG(incount),\
712: MPIUNI_ARG(datatype),\
713: MPIUNI_ARG(outbuf),\
714: MPIUNI_ARG(outsize),\
715: MPIUNI_ARG(position),\
716: MPIUNI_ARG(comm),\
717: MPIUni_Abort(MPI_COMM_WORLD,0))
718: #define MPI_Unpack(inbuf,insize,position,outbuf,outcount,datatype,comm) \
719: (MPIUNI_ARG(inbuf),\
720: MPIUNI_ARG(insize),\
721: MPIUNI_ARG(position),\
722: MPIUNI_ARG(outbuf),\
723: MPIUNI_ARG(outcount),\
724: MPIUNI_ARG(datatype),\
725: MPIUNI_ARG(comm),\
726: MPIUni_Abort(MPI_COMM_WORLD,0))
727: #define MPI_Pack_size(incount,datatype,comm,size) \
728: (MPIUNI_ARG(incount),\
729: MPIUNI_ARG(datatype),\
730: MPIUNI_ARG(comm),\
731: MPIUNI_ARG(size),\
732: MPIUni_Abort(MPI_COMM_WORLD,0))
733: #define MPI_Barrier(comm) \
734: (MPIUNI_ARG(comm),\
735: MPI_SUCCESS)
736: #define MPI_Bcast(buffer,count,datatype,root,comm) \
737: (MPIUNI_ARG(buffer),\
738: MPIUNI_ARG(count),\
739: MPIUNI_ARG(datatype),\
740: MPIUNI_ARG(root),\
741: MPIUNI_ARG(comm),\
742: MPI_SUCCESS)
743: #define MPI_Gather(sendbuf,sendcount,sendtype,\
744: recvbuf,recvcount, recvtype,\
745: root,comm) \
746: (MPIUNI_ARG(recvcount),\
747: MPIUNI_ARG(root),\
748: MPIUNI_ARG(recvtype),\
749: MPIUNI_ARG(comm),\
750: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
751: #define MPI_Gatherv(sendbuf,sendcount,sendtype,\
752: recvbuf,recvcounts,displs,\
753: recvtype,root,comm) \
754: (MPIUNI_ARG(recvcounts),\
755: MPIUNI_ARG(displs),\
756: MPIUNI_ARG(recvtype),\
757: MPIUNI_ARG(root),\
758: MPIUNI_ARG(comm),\
759: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
760: #define MPI_Scatter(sendbuf,sendcount,sendtype,\
761: recvbuf,recvcount,recvtype,\
762: root,comm) \
763: (MPIUNI_ARG(sendcount),\
764: MPIUNI_ARG(sendtype),\
765: MPIUNI_ARG(recvbuf),\
766: MPIUNI_ARG(recvtype),\
767: MPIUNI_ARG(root),\
768: MPIUNI_ARG(comm),\
769: MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
770: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
771: sendtype,recvbuf,recvcount,\
772: recvtype,root,comm) \
773: (MPIUNI_ARG(displs),\
774: MPIUNI_ARG(sendtype),\
775: MPIUNI_ARG(sendcounts),\
776: MPIUNI_ARG(root),\
777: MPIUNI_ARG(comm),\
778: MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
779: #define MPI_Allgather(sendbuf,sendcount,sendtype,\
780: recvbuf,recvcount,recvtype,comm) \
781: (MPIUNI_ARG(recvcount),\
782: MPIUNI_ARG(recvtype),\
783: MPIUNI_ARG(comm),\
784: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
785: #define MPI_Allgatherv(sendbuf,sendcount,sendtype,\
786: recvbuf,recvcounts,displs,recvtype,comm) \
787: (MPIUNI_ARG(recvcounts),\
788: MPIUNI_ARG(displs),\
789: MPIUNI_ARG(recvtype),\
790: MPIUNI_ARG(comm),\
791: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
792: #define MPI_Alltoall(sendbuf,sendcount,sendtype,\
793: recvbuf,recvcount,recvtype,comm) \
794: (MPIUNI_ARG(recvcount),\
795: MPIUNI_ARG(recvtype),\
796: MPIUNI_ARG(comm),\
797: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
798: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,sendtype,\
799: recvbuf,recvcounts,rdispls,recvtype,comm) \
800: (MPIUNI_ARG(sendbuf),\
801: MPIUNI_ARG(sendcounts),\
802: MPIUNI_ARG(sdispls),\
803: MPIUNI_ARG(sendtype),\
804: MPIUNI_ARG(recvbuf),\
805: MPIUNI_ARG(recvcounts),\
806: MPIUNI_ARG(rdispls),\
807: MPIUNI_ARG(recvtype),\
808: MPIUNI_ARG(comm),\
809: MPIUni_Abort(MPI_COMM_WORLD,0))
810: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,sendtypes,\
811: recvbuf,recvcounts,rdispls,recvtypes,comm) \
812: (MPIUNI_ARG(sendbuf),\
813: MPIUNI_ARG(sendcounts),\
814: MPIUNI_ARG(sdispls),\
815: MPIUNI_ARG(sendtypes),\
816: MPIUNI_ARG(recvbuf),\
817: MPIUNI_ARG(recvcount),\
818: MPIUNI_ARG(rdispls),\
819: MPIUNI_ARG(recvtypes),\
820: MPIUNI_ARG(comm),\
821: MPIUni_Abort(MPI_COMM_WORLD,0))
822: #define MPI_Reduce(sendbuf,recvbuf,count,datatype,op,root,comm) \
823: (MPIUNI_ARG(op),\
824: MPIUNI_ARG(root),\
825: MPIUNI_ARG(comm),\
826: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
827: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
828: (MPIUNI_ARG(op),\
829: MPIUNI_ARG(comm),\
830: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
831: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
832: (MPIUNI_ARG(op),\
833: MPIUNI_ARG(comm),\
834: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
835: #define MPI_Exscan(sendbuf,recvbuf,count,datatype,op,comm) \
836: (MPIUNI_ARG(sendbuf),\
837: MPIUNI_ARG(recvbuf),\
838: MPIUNI_ARG(count),\
839: MPIUNI_ARG(datatype),\
840: MPIUNI_ARG(op),\
841: MPIUNI_ARG(comm),\
842: MPI_SUCCESS)
843: #define MPI_Reduce_scatter(sendbuf,recvbuf,recvcounts,datatype,op,comm) \
844: (MPIUNI_ARG(sendbuf),\
845: MPIUNI_ARG(recvbuf),\
846: MPIUNI_ARG(recvcounts),\
847: MPIUNI_ARG(datatype),\
848: MPIUNI_ARG(op),\
849: MPIUNI_ARG(comm),\
850: MPIUni_Abort(MPI_COMM_WORLD,0))
852: #define MPI_Op_create(function,commute,op) \
853: (MPIUNI_ARG(function),\
854: MPIUNI_ARG(commute),\
855: MPIUNI_ARG(op),\
856: MPI_SUCCESS)
857: #define MPI_Op_free(op) \
858: (*(op) = MPI_OP_NULL, MPI_SUCCESS)
860: #define MPI_Group_size(group,size) \
861: (MPIUNI_ARG(group),\
862: *(size)=1,\
863: MPI_SUCCESS)
864: #define MPI_Group_rank(group,rank) \
865: (MPIUNI_ARG(group),\
866: *(rank)=0,\
867: MPI_SUCCESS)
868: #define MPI_Group_translate_ranks(group1,n,ranks1,group2,ranks2) \
869: (MPIUNI_ARG(group1),\
870: MPIUNI_ARG(group2),\
871: MPIUNI_Memcpy((ranks2),(ranks1),(n)*sizeof(int)))
872: #define MPI_Group_compare(group1,group2,result) \
873: (MPIUNI_ARG(group1),\
874: MPIUNI_ARG(group2),\
875: *(result)=1,\
876: MPI_SUCCESS)
877: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
878: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
879: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
880: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
881: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
882: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
883: #define MPI_Group_free(group) \
884: (*(group) = MPI_GROUP_NULL, MPI_SUCCESS)
886: #define MPI_Comm_compare(comm1,comm2,result) \
887: (MPIUNI_ARG(comm1),\
888: MPIUNI_ARG(comm2),\
889: *(result)=MPI_IDENT,\
890: MPI_SUCCESS)
891: #define MPI_Comm_split(comm,color,key,newcomm) \
892: (MPIUNI_ARG(color),\
893: MPIUNI_ARG(key),\
894: MPI_Comm_dup(comm,newcomm))
895: #define MPI_Comm_split_type(comm,color,key,info,newcomm) \
896: (MPIUNI_ARG(color),\
897: MPIUNI_ARG(key),\
898: MPIUNI_ARG(info),\
899: MPI_Comm_dup(comm,newcomm))
900: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1, MPI_SUCCESS)
901: #define MPI_Comm_remote_size(comm,size) (*(size)=1 ,MPI_SUCCESS)
902: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
903: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
904: remote_leader,tag,newintercomm) MPI_SUCCESS
905: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
906: #define MPI_Topo_test(comm,flag) MPI_SUCCESS
907: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
908: reorder,comm_cart) MPIUni_Abort(MPI_COMM_WORLD,0)
909: #define MPI_Dims_create(nnodes,ndims,dims) MPIUni_Abort(MPI_COMM_WORLD,0)
910: #define MPI_Graph_create(comm,a,b,c,d,e) MPIUni_Abort(MPI_COMM_WORLD,0)
911: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPIUni_Abort(MPI_COMM_WORLD,0)
912: #define MPI_Graph_get(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)
913: #define MPI_Cartdim_get(comm,ndims) MPIUni_Abort(MPI_COMM_WORLD,0)
914: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
915: MPIUni_Abort(MPI_COMM_WORLD,0)
916: #define MPI_Cart_rank(comm,coords,rank) MPIUni_Abort(MPI_COMM_WORLD,0)
917: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
918: MPIUni_Abort(MPI_COMM_WORLD,0)
919: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
920: MPIUni_Abort(MPI_COMM_WORLD,0)
921: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
922: MPIUni_Abort(MPI_COMM_WORLD,0)
923: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
924: MPIUni_Abort(MPI_COMM_WORLD,0)
925: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPIUni_Abort(MPI_COMM_WORLD,0)
926: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPIUni_Abort(MPI_COMM_WORLD,0)
927: #define MPI_Graph_map(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)
929: #define MPI_Get_processor_name(name,result_len) \
930: (*(result_len) = 9,MPIUNI_Memcpy(name,"localhost",10*sizeof(char)))
931: #define MPI_Errhandler_create(function,errhandler) \
932: (MPIUNI_ARG(function),\
933: *(errhandler) = MPI_ERRORS_RETURN,\
934: MPI_SUCCESS)
935: #define MPI_Errhandler_set(comm,errhandler) \
936: (MPIUNI_ARG(comm),\
937: MPIUNI_ARG(errhandler),\
938: MPI_SUCCESS)
939: #define MPI_Errhandler_get(comm,errhandler) \
940: (MPIUNI_ARG(comm),\
941: (*errhandler) = MPI_ERRORS_RETURN,\
942: MPI_SUCCESS)
943: #define MPI_Errhandler_free(errhandler) \
944: (*(errhandler) = MPI_ERRHANDLER_NULL,\
945: MPI_SUCCESS)
946: #define MPI_Error_string(errorcode,string,result_len) \
947: (MPIUNI_ARG(errorcode),\
948: *(result_len) = 9,\
949: MPIUNI_Memcpy(string,"MPI error",10*sizeof(char)))
950: #define MPI_Error_class(errorcode,errorclass) \
951: (*(errorclass) = errorcode, MPI_SUCCESS)
952: #define MPI_Wtick() 1.0
953: #define MPI_Pcontrol(level) MPI_SUCCESS
955: /* MPI-IO additions */
957: typedef int MPI_File;
958: #define MPI_FILE_NULL 0
960: typedef int MPI_Offset;
962: #define MPI_MODE_RDONLY 0
963: #define MPI_MODE_WRONLY 0
964: #define MPI_MODE_CREATE 0
966: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
967: (MPIUNI_ARG(comm),\
968: MPIUNI_ARG(filename),\
969: MPIUNI_ARG(amode),\
970: MPIUNI_ARG(info),\
971: MPIUNI_ARG(mpi_fh),\
972: MPIUni_Abort(MPI_COMM_WORLD,0))
974: #define MPI_File_close(mpi_fh) \
975: (MPIUNI_ARG(mpi_fh),\
976: MPIUni_Abort(MPI_COMM_WORLD,0))
978: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
979: (MPIUNI_ARG(mpi_fh),\
980: MPIUNI_ARG(disp),\
981: MPIUNI_ARG(etype),\
982: MPIUNI_ARG(filetype),\
983: MPIUNI_ARG(datarep),\
984: MPIUNI_ARG(info),\
985: MPIUni_Abort(MPI_COMM_WORLD,0))
987: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
988: (MPIUNI_ARG(mpi_fh),\
989: MPIUNI_ARG(buf),\
990: MPIUNI_ARG(count),\
991: MPIUNI_ARG(datatype),\
992: MPIUNI_ARG(status),\
993: MPIUni_Abort(MPI_COMM_WORLD,0))
995: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
996: (MPIUNI_ARG(mpi_fh),\
997: MPIUNI_ARG(buf),\
998: MPIUNI_ARG(count),\
999: MPIUNI_ARG(datatype),\
1000: MPIUNI_ARG(status),\
1001: MPIUni_Abort(MPI_COMM_WORLD,0))
1003: /* called from PetscInitialize() - so return success */
1004: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
1005: (MPIUNI_ARG(name),\
1006: MPIUNI_ARG(read_conv_fn),\
1007: MPIUNI_ARG(write_conv_fn),\
1008: MPIUNI_ARG(extent_fn),\
1009: MPIUNI_ARG(state),\
1010: MPI_SUCCESS)
1012: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
1013: (MPIUNI_ARG(ndims),\
1014: MPIUNI_ARG(array_of_sizes),\
1015: MPIUNI_ARG(array_of_subsizes),\
1016: MPIUNI_ARG(array_of_starts),\
1017: MPIUNI_ARG(order),\
1018: MPIUNI_ARG(oldtype),\
1019: MPIUNI_ARG(newtype),\
1020: MPIUni_Abort(MPI_COMM_WORLD,0))
1022: #define MPI_Type_create_resized(oldtype,lb,extent,newtype) \
1023: (MPIUNI_ARG(oldtype),\
1024: MPIUNI_ARG(lb),\
1025: MPIUNI_ARG(extent),\
1026: MPIUNI_ARG(newtype),\
1027: MPIUni_Abort(MPI_COMM_WORLD,0))
1029: #if defined(__cplusplus)
1030: }
1031: #endif
1032: #endif