GEOSX
Public Types | Public Member Functions | Static Public Member Functions | List of all members
geosx::MpiWrapper Struct Reference

#include <MpiWrapper.hpp>

Public Types

enum  Reduction { Reduction::Max, Reduction::Min, Reduction::Sum, Reduction::Prod }
 

Public Member Functions

template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<>
MPI_Datatype getMpiType ()
 
template<typename T_SEND , typename T_RECV >
int allgather (T_SEND const *const sendbuf, int sendcount, T_RECV *const recvbuf, int recvcount, MPI_Comm MPI_PARAM(comm))
 
template<typename T >
void allGather (T const myValue, array1d< T > &allValues, MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int allGather (arrayView1d< T const > const &sendValues, array1d< T > &allValues, MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int allReduce (T const *const sendbuf, T *const recvbuf, int const count, MPI_Op MPI_PARAM(op), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int scan (T const *const sendbuf, T *const recvbuf, int count, MPI_Op MPI_PARAM(op), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int exscan (T const *const MPI_PARAM(sendbuf), T *const recvbuf, int count, MPI_Op MPI_PARAM(op), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int bcast (T *const MPI_PARAM(buffer), int MPI_PARAM(count), int MPI_PARAM(root), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
void broadcast (T &MPI_PARAM(value), int MPI_PARAM(srcRank), MPI_Comm MPI_PARAM(comm))
 
template<>
void broadcast (string &MPI_PARAM(value), int MPI_PARAM(srcRank), MPI_Comm MPI_PARAM(comm))
 
template<typename TS , typename TR >
int gather (TS const *const sendbuf, int sendcount, TR *const recvbuf, int recvcount, int MPI_PARAM(root), MPI_Comm MPI_PARAM(comm))
 
template<typename TS , typename TR >
int gatherv (TS const *const sendbuf, int sendcount, TR *const recvbuf, const int *recvcounts, const int *MPI_PARAM(displs), int MPI_PARAM(root), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int iRecv (T *const buf, int count, int MPI_PARAM(source), int tag, MPI_Comm MPI_PARAM(comm), MPI_Request *MPI_PARAM(request))
 
template<typename T >
int iSend (T const *const buf, int count, int MPI_PARAM(dest), int tag, MPI_Comm MPI_PARAM(comm), MPI_Request *MPI_PARAM(request))
 
template<typename T >
reduce (T const &value, Reduction const MPI_PARAM(op), MPI_Comm comm)
 

Static Public Member Functions

static std::map< int, std::pair< int, void *> > & getTagToPointersMap ()
 
template<typename T_SEND , typename T_RECV >
static int allgather (T_SEND const *sendbuf, int sendcount, T_RECV *recvbuf, int recvcount, MPI_Comm comm)
 Strongly typed wrapper around MPI_Allgather. More...
 
template<typename T >
static void allGather (T const myValue, array1d< T > &allValues, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for MPI_Allgather. More...
 
template<typename T >
static int allGather (arrayView1d< T const > const &sendbuf, array1d< T > &recvbuf, MPI_Comm comm=MPI_COMM_GEOSX)
 
template<typename T >
static int allReduce (T const *sendbuf, T *recvbuf, int count, MPI_Op op, MPI_Comm comm)
 Strongly typed wrapper around MPI_Allreduce. More...
 
template<typename T >
static int scan (T const *sendbuf, T *recvbuf, int count, MPI_Op op, MPI_Comm comm)
 
template<typename T >
static int exscan (T const *sendbuf, T *recvbuf, int count, MPI_Op op, MPI_Comm comm)
 
template<typename T >
static int bcast (T *buffer, int count, int root, MPI_Comm comm)
 Strongly typed wrapper around MPI_Bcast. More...
 
template<typename T >
static void broadcast (T &value, int srcRank=0, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for MPI_Broadcast. More...
 
template<typename TS , typename TR >
static int gather (TS const *const sendbuf, int sendcount, TR *const recvbuf, int recvcount, int root, MPI_Comm comm)
 Strongly typed wrapper around MPI_Gather(). More...
 
template<typename TS , typename TR >
static int gatherv (TS const *const sendbuf, int sendcount, TR *const recvbuf, const int *recvcounts, const int *displs, int root, MPI_Comm comm)
 Strongly typed wrapper around MPI_Gatherv. More...
 
template<typename T >
static MPI_Datatype getMpiType ()
 Returns an MPI_Datatype from a c type. More...
 
static std::size_t getSizeofMpiType (MPI_Datatype const type)
 
static MPI_Op getMpiOp (Reduction const op)
 Returns an MPI_Op associated with our strongly typed Reduction enum. More...
 
template<typename T >
static int recv (array1d< T > &buf, int MPI_PARAM(source), int tag, MPI_Comm MPI_PARAM(comm), MPI_Status *MPI_PARAM(request))
 
template<typename T >
static int iSend (arrayView1d< T const > const &buf, int MPI_PARAM(dest), int tag, MPI_Comm MPI_PARAM(comm), MPI_Request *MPI_PARAM(request))
 
template<typename T >
static int iRecv (T *const buf, int count, int source, int tag, MPI_Comm comm, MPI_Request *request)
 Strongly typed wrapper around MPI_Irecv() More...
 
template<typename T >
static int iSend (T const *const buf, int count, int dest, int tag, MPI_Comm comm, MPI_Request *request)
 Strongly typed wrapper around MPI_Isend() More...
 
template<typename T >
static T min (T const &value, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Reduce using a MPI_MIN operation. More...
 
template<typename T >
static T max (T const &value, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Reduce using a MPI_MAX operation. More...
 
template<typename U , typename T >
static U prefixSum (T const value, MPI_Comm comm=MPI_COMM_GEOSX)
 Compute exclusive prefix sum and full sum. More...
 
template<typename T >
static T reduce (T const &value, Reduction const op, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for the MPI_Reduce function. More...
 
template<typename T >
static T sum (T const &value, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Reduce using a MPI_SUM operation. More...
 
FUNCTION GROUP for the direct wrappers around naitive MPI functions
Parameters
[in]sendbufPointer to the memory to read the sent data from.
[out]recvbufPointer to the memory to write the received data in.
[in]countThe number of data entries that are being communicated.
[in]datatypeThe MPI_Datatype that is being communicated.
[in]opThe collective MPI_Op to apply for the function.
[in]commThe MPI_Comm communicator that the function will act on.

Please see standard MPI documentation for a detailed description of the parameters for each function that is being wrapped

static void barrier (MPI_Comm const &MPI_PARAM(comm)=MPI_COMM_GEOSX)
 
static int cartCoords (MPI_Comm comm, int rank, int maxdims, int coords[])
 
static int cartCreate (MPI_Comm comm_old, int ndims, const int dims[], const int periods[], int reorder, MPI_Comm *comm_cart)
 
static int cartRank (MPI_Comm comm, const int coords[])
 
static void commFree (MPI_Comm &comm)
 
static int commRank (MPI_Comm const &MPI_PARAM(comm)=MPI_COMM_GEOSX)
 
static int commSize (MPI_Comm const &MPI_PARAM(comm)=MPI_COMM_GEOSX)
 
static bool commCompare (MPI_Comm const &comm1, MPI_Comm const &comm2)
 
static bool initialized ()
 
static int init (int *argc, char ***argv)
 
static void finalize ()
 
static MPI_Comm commDup (MPI_Comm const comm)
 
static MPI_Comm commSplit (MPI_Comm const comm, int color, int key)
 
static int test (MPI_Request *request, int *flag, MPI_Status *status)
 
static int testAny (int count, MPI_Request array_of_requests[], int *idx, int *flags, MPI_Status array_of_statuses[])
 
static int testSome (int count, MPI_Request array_of_requests[], int *outcount, int array_of_indices[], MPI_Status array_of_statuses[])
 
static int testAll (int count, MPI_Request array_of_requests[], int *flags, MPI_Status array_of_statuses[])
 
static int check (MPI_Request *request, int *flag, MPI_Status *status)
 
static int checkAny (int count, MPI_Request array_of_requests[], int *idx, int *flag, MPI_Status array_of_statuses[])
 
static int checkAll (int count, MPI_Request array_of_requests[], int *flag, MPI_Status array_of_statuses[])
 
static int wait (MPI_Request *request, MPI_Status *status)
 
static int waitAny (int count, MPI_Request array_of_requests[], int *indx, MPI_Status array_of_statuses[])
 
static int waitSome (int count, MPI_Request array_of_requests[], int *outcount, int array_of_indices[], MPI_Status array_of_statuses[])
 
static int waitAll (int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[])
 
static double wtime (void)
 
static int activeWaitAny (const int count, MPI_Request array_of_requests[], std::function< void(int) > func)
 
static int activeWaitSome (const int count, MPI_Request array_of_requests[], std::function< void(int) > func)
 
static int activeWaitSomePartialPhase (const int participants, std::vector< std::function< MPI_Request(int) > > const &phases)
 
static int activeWaitSomeCompletePhase (const int participants, std::vector< std::function< MPI_Request(int) > > const &phases)
 
static int activeWaitOrderedCompletePhase (const int participants, std::vector< std::function< MPI_Request(int) > > const &phases)
 

Detailed Description

This struct is a wrapper for all mpi.h functions that are used in GEOSX, and provides a collection of convenience functions to make using the raw mpi functions simpler.

The static wrapper functions around the mpi.h function are named by removing the "MPI_" from the beginning of the native mpi function name. For instance the "Comm_rank()" function calls "MPI_Comm_rank()". Since all wrapper functions are static, the should be referred to by their scoped name, for example "MpiWrapper::commRank()".

Definition at line 108 of file MpiWrapper.hpp.

Member Enumeration Documentation

◆ Reduction

Strongly typed enum class for calling collective functions using MPI_Op

Enumerator
Max 

Max.

Min 

Min.

Sum 

Sum.

Prod 

Prod.

Definition at line 116 of file MpiWrapper.hpp.

Member Function Documentation

◆ activeWaitAny()

static int geosx::MpiWrapper::activeWaitAny ( const int  count,
MPI_Request  array_of_requests[],
std::function< void(int) >  func 
)
static

Wait on MPI_Requests to complete on at a time and trigger a callback to process the completion.

Parameters
[in]countThe number of MPI_Requests being processed.
[in,out]array_of_requestsThe MPI_Requests to actively wait on.
[in]funcA callable object accepting an integer denoting the MPI_Request index which has completed.
Returns
MPI_SUCCESS or an MPI_ERROR returned by internal calls to MPI_WaitAny.

◆ activeWaitOrderedCompletePhase()

static int geosx::MpiWrapper::activeWaitOrderedCompletePhase ( const int  participants,
std::vector< std::function< MPI_Request(int) > > const &  phases 
)
static

Active blocking phased communication with multiple participants, each participant in each phase may depend on the previous phases being fully complete prior to entry into a subsequent phase.

Parameters
[in]participantsThe number of participants in each phase
[in]phasesA vector of function objects taking int and returning MPI_Requests denoting the state of that participant in that phase.
Note
The restriction on phase[N](index) being called is that phase[N-1](0 - (particpants-1)) and phaseN have all been called and the MPI_Requests from those calls have all been completed.
One can add a final recv phase by having that phase return MPI_REQUEST_NULL.
Returns
MPI_SUCCESS or and MPI_ERROR from internal calls to MPI_WaitAny.

◆ activeWaitSome()

static int geosx::MpiWrapper::activeWaitSome ( const int  count,
MPI_Request  array_of_requests[],
std::function< void(int) >  func 
)
static

Wait on MPI_Requests to complete on or more at a time and trigger a callback to process the completion.

Parameters
[in]countThe number of MPI_Requests being processed.
[in,out]array_of_requestsThe MPI_Requests to actively wait on.
[in]funcA callable object accepting an integer denoting the MPI_Request index which has completed.
Returns
MPI_SUCCESS or an MPI_ERROR returned by internal calls to MPI_WaitSome.

◆ activeWaitSomeCompletePhase()

static int geosx::MpiWrapper::activeWaitSomeCompletePhase ( const int  participants,
std::vector< std::function< MPI_Request(int) > > const &  phases 
)
static

Active non-blocking phased communication with multiple participants, each participant in each phase may depend on the previous phases being fully complete prior to entry into a subsequent phase.

Parameters
[in]participantsThe number of participants in each phase
[in]phasesA vector of function objects taking int and returning MPI_Requests denoting the state of that participant in that phase.
Note
The restriction on phase[N](index) being called is that phase[N-1](0 - (particpants-1)) have all been called and the MPI_Requests from those calls have all been completed.
One can add a final recv phase by having that phase return MPI_REQUEST_NULL.
Returns
MPI_SUCCESS or and MPI_ERROR from internal calls to MPI_WaitAny.

◆ activeWaitSomePartialPhase()

static int geosx::MpiWrapper::activeWaitSomePartialPhase ( const int  participants,
std::vector< std::function< MPI_Request(int) > > const &  phases 
)
static

Active non-blocking phased communication with multiple participants, each participant in each phase cannot depend on the previous phases being complete for any participant other than itself.

Parameters
[in]participantsThe number of participants in each phase
[in]phasesA vector of function objects taking int and returning MPI_Requests denoting the state of that participant in that phase.
Note
The only restriction on phase[N](index) being called is that phase[N-1](index) has been called and the MPI_Request returned by that call has completed.
One can add a final recv phase by having that phase return MPI_REQUEST_NULL.
Returns
MPI_SUCCESS or and MPI_ERROR from internal calls to MPI_WaitAny.

◆ allgather()

template<typename T_SEND , typename T_RECV >
static int geosx::MpiWrapper::allgather ( T_SEND const *  sendbuf,
int  sendcount,
T_RECV *  recvbuf,
int  recvcount,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Allgather.

Template Parameters
T_SENDThe pointer type for sendbuf
T_RECVThe pointer type for recvbuf
Parameters
[in]sendbufThe pointer to the sending buffer.
[in]sendcountThe number of values to send.
[out]recvbufThe pointer to the receive buffer.
[in]recvcountThe number of values to receive.
[in]commThe MPI_Comm over which the gather operates.
Returns
The return value of the underlying call to MPI_Allgather().

◆ allGather()

template<typename T >
static void geosx::MpiWrapper::allGather ( T const  myValue,
array1d< T > &  allValues,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for MPI_Allgather.

Template Parameters
TThe type to send/recieve. This must have a valid conversion to MPI_Datatype in getMpiType();
Parameters
[in]myValueThe value to send.
[out]allValuesThe values recived from each rank.

◆ allReduce()

template<typename T >
static int geosx::MpiWrapper::allReduce ( T const *  sendbuf,
T *  recvbuf,
int  count,
MPI_Op  op,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Allreduce.

Parameters
[in]sendbufThe pointer to the sending buffer.
[out]recvbufThe pointer to the receive buffer.
[in]countThe number of values to send/receive.
[in]opThe MPI_Op to perform.
[in]commThe MPI_Comm over which the gather operates.
Returns
The return value of the underlying call to MPI_Allreduce().

◆ bcast()

template<typename T >
static int geosx::MpiWrapper::bcast ( T *  buffer,
int  count,
int  root,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Bcast.

Parameters

◆ broadcast()

template<typename T >
static void geosx::MpiWrapper::broadcast ( T &  value,
int  srcRank = 0,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for MPI_Broadcast.

Template Parameters
TThe type to send/recieve. This must have a valid conversion to MPI_Type in getMpiType();
Parameters

◆ check()

static int geosx::MpiWrapper::check ( MPI_Request *  request,
int *  flag,
MPI_Status status 
)
static

The same as test but doesn't deallocate requests regardless of their source.

Parameters
[in]requestThe MPI_Request to check for completion
[out]flagWhether the request has completed or not
[out]statusThe current status of the request

◆ checkAll()

static int geosx::MpiWrapper::checkAll ( int  count,
MPI_Request  array_of_requests[],
int *  flag,
MPI_Status  array_of_statuses[] 
)
static

The same as testall but doesn't deallocate requests regardless of their source.

Note
Since this doesn't deallocate or set request to MPI_REQUEST_NULL repeated calls to it with a set of already-completed requests will continue to return.
Parameters
[in]countThe number of requests in the array to check
[in]requestThe MPI_Requests to check for completion
[out]flagWhether all requests have completed or not
[out]statusThe current status of all requests

◆ checkAny()

static int geosx::MpiWrapper::checkAny ( int  count,
MPI_Request  array_of_requests[],
int *  idx,
int *  flag,
MPI_Status  array_of_statuses[] 
)
static

The same as testany but doesn't deallocate requests regardless of their source.

Note
Since this doesn't deallocate or set request to MPI_REQUEST_NULL repeated calls to it with the same set of requests will return the first completed request each time thus if the first request is complete, that will always be returned unless the user deallocates/overwrites the request with MPI_REQUEST_NULL
Parameters
[in]countThe number of requests in the array to check
[in]requestThe MPI_Requests to check for completion
[out]idxThe index of the first request in the array that is complete
[out]flagWhether a request has completed or not
[out]statusThe current status of all requests

◆ gather()

template<typename TS , typename TR >
static int geosx::MpiWrapper::gather ( TS const *const  sendbuf,
int  sendcount,
TR *const  recvbuf,
int  recvcount,
int  root,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Gather().

Template Parameters
TSThe pointer type for sendbuf
TRThe pointer type for recvbuf
Parameters
[in]sendbufThe pointer to the sending buffer.
[in]sendcountThe number of values to send.
[out]recvbufThe pointer to the receive buffer.
[in]recvcountThe number of values to receive.
[in]rootThe rank recieving the data.
[in]commThe MPI_Comm over which the gather operates.
Returns

◆ gatherv()

template<typename TS , typename TR >
static int geosx::MpiWrapper::gatherv ( TS const *const  sendbuf,
int  sendcount,
TR *const  recvbuf,
const int *  recvcounts,
const int *  displs,
int  root,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Gatherv.

Template Parameters
TSThe pointer type for sendbuf
TRThe pointer type for recvbuf
Parameters
[in]sendbufThe pointer to the sending buffer.
[in]sendcountThe number of values to send.
[out]recvbufThe pointer to the receive buffer.
[in]recvcountThe number of values to receive.
[in]displsinteger array (of length group size). Entry i specifies the displacement relative to recvbuf at which to place the incoming data from process i (significant only at root).
[in]rootThe rank recieving the data.
[in]commThe MPI_Comm over which the gather operates.
Returns

◆ getMpiOp()

MPI_Op geosx::MpiWrapper::getMpiOp ( Reduction const  op)
inlinestatic

Returns an MPI_Op associated with our strongly typed Reduction enum.

Parameters
[in]opThe value of the Reduction enum to get an MPI_Op for.
Returns
The MPI_Op associated with op.

Definition at line 572 of file MpiWrapper.hpp.

◆ getMpiType()

template<typename T >
static MPI_Datatype geosx::MpiWrapper::getMpiType ( )
static

Returns an MPI_Datatype from a c type.

Template Parameters
TThe type for which we want an MPI_Datatype
Returns
The MPI_Datatype associated wtih
Parameters
T

◆ iRecv()

template<typename T >
static int geosx::MpiWrapper::iRecv ( T *const  buf,
int  count,
int  source,
int  tag,
MPI_Comm  comm,
MPI_Request *  request 
)
static

Strongly typed wrapper around MPI_Irecv()

Parameters
[out]bufThe pointer to the buffer that contains the data to be received.
[in]countThe number of elements in buf
[in]sourceThe rank of the source process within comm.
[in]tagThe message tag that is be used to distinguish different types of messages
[in]commThe handle to the MPI_Comm
[out]requestPointer to the MPI_Request associated with this request.
Returns

◆ iSend()

template<typename T >
static int geosx::MpiWrapper::iSend ( T const *const  buf,
int  count,
int  dest,
int  tag,
MPI_Comm  comm,
MPI_Request *  request 
)
static

Strongly typed wrapper around MPI_Isend()

Parameters
[in]bufThe pointer to the buffer that contains the data to be sent.
[in]countThe number of elements in buf.
[in]destThe rank of the destination process within comm.
[in]tagThe message tag that is be used to distinguish different types of messages.
[in]commThe handle to the MPI_Comm.
[out]requestPointer to the MPI_Request associated with this request.
Returns

◆ max()

template<typename T >
T geosx::MpiWrapper::max ( T const &  value,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Reduce using a MPI_MAX operation.

Parameters
[in]valuethe value to send into the reduction.
Returns
The maximum of all value across the ranks.

Definition at line 946 of file MpiWrapper.hpp.

◆ min()

template<typename T >
T geosx::MpiWrapper::min ( T const &  value,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Reduce using a MPI_MIN operation.

Parameters
valuethe value to send into the reduction.
Returns
The minimum of all value across the ranks.

Definition at line 940 of file MpiWrapper.hpp.

◆ prefixSum()

template<typename U , typename T >
U geosx::MpiWrapper::prefixSum ( T const  value,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Compute exclusive prefix sum and full sum.

Template Parameters
Ttype of local (rank) value
Utype of global (sum) value
Parameters
[in]valuethe local value
Returns
a pair where first is the prefix sum, second is the full sum

Definition at line 905 of file MpiWrapper.hpp.

◆ reduce()

template<typename T >
static T geosx::MpiWrapper::reduce ( T const &  value,
Reduction const  op,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for the MPI_Reduce function.

Template Parameters
Thetype of data to reduce. Must be a valid MPI_Datatype.
Parameters
valueThe value to send to the reduction.
opThe Reduction enum to perform.
Returns
The value of reduction across all ranks

◆ sum()

template<typename T >
T geosx::MpiWrapper::sum ( T const &  value,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Reduce using a MPI_SUM operation.

Parameters
[in]valuethe value to send into the reduction.
Returns
The sum of all value across the ranks.

Definition at line 934 of file MpiWrapper.hpp.


The documentation for this struct was generated from the following file: