GEOSX
Public Types | Public Member Functions | Static Public Member Functions | List of all members
geos::MpiWrapper Struct Reference

#include <MpiWrapper.hpp>

Public Types

enum  Reduction { Reduction::Max, Reduction::Min, Reduction::Sum, Reduction::Prod }
 

Public Member Functions

template<typename T_SEND , typename T_RECV >
int allgather (T_SEND const *const sendbuf, int sendcount, T_RECV *const recvbuf, int recvcount, MPI_Comm MPI_PARAM(comm))
 
template<typename T_SEND , typename T_RECV >
int allgatherv (T_SEND const *const sendbuf, int sendcount, T_RECV *const recvbuf, int *recvcounts, int *displacements, MPI_Comm MPI_PARAM(comm))
 
template<typename T >
void allGather (T const myValue, array1d< T > &allValues, MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int allGather (arrayView1d< T const > const &sendValues, array1d< T > &allValues, MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int allReduce (T const *const sendbuf, T *const recvbuf, int const count, MPI_Op const MPI_PARAM(op), MPI_Comm const MPI_PARAM(comm))
 
template<typename T >
int reduce (T const *const sendbuf, T *const recvbuf, int const count, MPI_Op const MPI_PARAM(op), int root, MPI_Comm const MPI_PARAM(comm))
 
template<typename T >
int scan (T const *const sendbuf, T *const recvbuf, int count, MPI_Op MPI_PARAM(op), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int exscan (T const *const MPI_PARAM(sendbuf), T *const recvbuf, int count, MPI_Op MPI_PARAM(op), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int bcast (T *const MPI_PARAM(buffer), int MPI_PARAM(count), int MPI_PARAM(root), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
void broadcast (T &MPI_PARAM(value), int MPI_PARAM(srcRank), MPI_Comm MPI_PARAM(comm))
 
template<typename TS , typename TR >
int gather (TS const *const sendbuf, int sendcount, TR *const recvbuf, int recvcount, int MPI_PARAM(root), MPI_Comm MPI_PARAM(comm))
 
template<typename TS , typename TR >
int gatherv (TS const *const sendbuf, int sendcount, TR *const recvbuf, const int *recvcounts, const int *MPI_PARAM(displs), int MPI_PARAM(root), MPI_Comm MPI_PARAM(comm))
 
template<typename T >
int iRecv (T *const buf, int count, int MPI_PARAM(source), int tag, MPI_Comm MPI_PARAM(comm), MPI_Request *MPI_PARAM(request))
 
template<typename T >
int iSend (T const *const buf, int count, int MPI_PARAM(dest), int tag, MPI_Comm MPI_PARAM(comm), MPI_Request *MPI_PARAM(request))
 

Static Public Member Functions

static std::map< int, std::pair< int, void * > > & getTagToPointersMap ()
 
static int nodeCommSize ()
 Compute the number of ranks allocated on the same node. More...
 
template<typename T_SEND , typename T_RECV >
static int allgather (T_SEND const *sendbuf, int sendcount, T_RECV *recvbuf, int recvcount, MPI_Comm comm)
 Strongly typed wrapper around MPI_Allgather. More...
 
template<typename T_SEND , typename T_RECV >
static int allgatherv (T_SEND const *sendbuf, int sendcount, T_RECV *recvbuf, int *recvcounts, int *displacements, MPI_Comm comm)
 Strongly typed wrapper around MPI_Allgatherv. More...
 
template<typename T >
static void allGather (T const myValue, array1d< T > &allValues, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for MPI_Allgather. More...
 
template<typename T >
static int allGather (arrayView1d< T const > const &sendbuf, array1d< T > &recvbuf, MPI_Comm comm=MPI_COMM_GEOSX)
 
template<typename T >
static int allReduce (T const *sendbuf, T *recvbuf, int count, MPI_Op op, MPI_Comm comm=MPI_COMM_GEOSX)
 Strongly typed wrapper around MPI_Allreduce. More...
 
template<typename T >
static T allReduce (T const &value, Reduction const op, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience wrapper for the MPI_Allreduce function. More...
 
template<typename T >
static void allReduce (Span< T const > src, Span< T > dst, Reduction const op, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience wrapper for the MPI_Allreduce function. Version for sequences. More...
 
template<typename T >
static int reduce (T const *sendbuf, T *recvbuf, int count, MPI_Op op, int root, MPI_Comm comm=MPI_COMM_GEOSX)
 Strongly typed wrapper around MPI_Reduce. More...
 
template<typename T >
static T reduce (T const &value, Reduction const op, int root, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience wrapper for the MPI_Reduce function. More...
 
template<typename T >
static void reduce (Span< T const > src, Span< T > dst, Reduction const op, int root, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience wrapper for the MPI_Reduce function. Version for sequences. More...
 
template<typename T >
static int scan (T const *sendbuf, T *recvbuf, int count, MPI_Op op, MPI_Comm comm)
 
template<typename T >
static int exscan (T const *sendbuf, T *recvbuf, int count, MPI_Op op, MPI_Comm comm)
 
template<typename T >
static int bcast (T *buffer, int count, int root, MPI_Comm comm)
 Strongly typed wrapper around MPI_Bcast. More...
 
template<typename T >
static void broadcast (T &value, int srcRank=0, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for MPI_Broadcast. More...
 
template<typename TS , typename TR >
static int gather (TS const *const sendbuf, int sendcount, TR *const recvbuf, int recvcount, int root, MPI_Comm comm)
 Strongly typed wrapper around MPI_Gather(). More...
 
template<typename TS , typename TR >
static int gatherv (TS const *const sendbuf, int sendcount, TR *const recvbuf, const int *recvcounts, const int *displs, int root, MPI_Comm comm)
 Strongly typed wrapper around MPI_Gatherv. More...
 
static MPI_Op getMpiOp (Reduction const op)
 Returns an MPI_Op associated with our strongly typed Reduction enum. More...
 
template<typename T >
static int recv (array1d< T > &buf, int MPI_PARAM(source), int tag, MPI_Comm MPI_PARAM(comm), MPI_Status *MPI_PARAM(request))
 
template<typename T >
static int iSend (arrayView1d< T > const &buf, int MPI_PARAM(dest), int tag, MPI_Comm MPI_PARAM(comm), MPI_Request *MPI_PARAM(request))
 
template<typename T >
static int iRecv (T *const buf, int count, int source, int tag, MPI_Comm comm, MPI_Request *request)
 Strongly typed wrapper around MPI_Irecv() More...
 
template<typename T >
static int iSend (T const *const buf, int count, int dest, int tag, MPI_Comm comm, MPI_Request *request)
 Strongly typed wrapper around MPI_Isend() More...
 
template<typename U , typename T >
static U prefixSum (T const value, MPI_Comm comm=MPI_COMM_GEOSX)
 Compute exclusive prefix sum and full sum. More...
 
template<typename T >
static T sum (T const &value, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Allreduce using a MPI_SUM operation. More...
 
template<typename T >
static void sum (Span< T const > src, Span< T > dst, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Allreduce using a MPI_SUM operation. More...
 
template<typename T >
static T min (T const &value, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Allreduce using a MPI_MIN operation. More...
 
template<typename T >
static void min (Span< T const > src, Span< T > dst, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Allreduce using a MPI_MIN operation. More...
 
template<typename T >
static T max (T const &value, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Allreduce using a MPI_MAX operation. More...
 
template<typename T >
static void max (Span< T const > src, Span< T > dst, MPI_Comm comm=MPI_COMM_GEOSX)
 Convenience function for a MPI_Allreduce using a MPI_MAX operation. More...
 
template<>
void broadcast (string &MPI_PARAM(value), int MPI_PARAM(srcRank), MPI_Comm MPI_PARAM(comm))
 
FUNCTION GROUP for the direct wrappers around naitive MPI functions
Parameters
[in]sendbufPointer to the memory to read the sent data from.
[out]recvbufPointer to the memory to write the received data in.
[in]countThe number of data entries that are being communicated.
[in]datatypeThe MPI_Datatype that is being communicated.
[in]opThe collective MPI_Op to apply for the function.
[in]commThe MPI_Comm communicator that the function will act on.

Please see standard MPI documentation for a detailed description of the parameters for each function that is being wrapped

static void barrier (MPI_Comm const &MPI_PARAM(comm)=MPI_COMM_GEOSX)
 
static int cartCoords (MPI_Comm comm, int rank, int maxdims, int coords[])
 
static int cartCreate (MPI_Comm comm_old, int ndims, const int dims[], const int periods[], int reorder, MPI_Comm *comm_cart)
 
static int cartRank (MPI_Comm comm, const int coords[])
 
static void commFree (MPI_Comm &comm)
 
static int commRank (MPI_Comm const &MPI_PARAM(comm)=MPI_COMM_GEOSX)
 
static int commSize (MPI_Comm const &MPI_PARAM(comm)=MPI_COMM_GEOSX)
 
static bool commCompare (MPI_Comm const &comm1, MPI_Comm const &comm2)
 
static bool initialized ()
 
static int init (int *argc, char ***argv)
 
static void finalize ()
 
static MPI_Comm commDup (MPI_Comm const comm)
 
static MPI_Comm commSplit (MPI_Comm const comm, int color, int key)
 
static int test (MPI_Request *request, int *flag, MPI_Status *status)
 
static int testAny (int count, MPI_Request array_of_requests[], int *idx, int *flags, MPI_Status array_of_statuses[])
 
static int testSome (int count, MPI_Request array_of_requests[], int *outcount, int array_of_indices[], MPI_Status array_of_statuses[])
 
static int testAll (int count, MPI_Request array_of_requests[], int *flags, MPI_Status array_of_statuses[])
 
static int check (MPI_Request *request, int *flag, MPI_Status *status)
 
static int checkAny (int count, MPI_Request array_of_requests[], int *idx, int *flag, MPI_Status array_of_statuses[])
 
static int checkAll (int count, MPI_Request array_of_requests[], int *flag, MPI_Status array_of_statuses[])
 
static int wait (MPI_Request *request, MPI_Status *status)
 
static int waitAny (int count, MPI_Request array_of_requests[], int *indx, MPI_Status array_of_statuses[])
 
static int waitSome (int count, MPI_Request array_of_requests[], int *outcount, int array_of_indices[], MPI_Status array_of_statuses[])
 
static int waitAll (int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[])
 
static double wtime (void)
 
static int activeWaitAny (const int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[], std::function< MPI_Request(int) > func)
 
static int activeWaitSome (const int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[], std::function< MPI_Request(int) > func)
 
static int activeWaitSomeCompletePhase (const int participants, std::vector< std::tuple< MPI_Request *, MPI_Status *, std::function< MPI_Request(int) > > > const &phases)
 
static int activeWaitOrderedCompletePhase (const int participants, std::vector< std::tuple< MPI_Request *, MPI_Status *, std::function< MPI_Request(int) > > > const &phases)
 

Detailed Description

This struct is a wrapper for all mpi.h functions that are used in GEOSX, and provides a collection of convenience functions to make using the raw mpi functions simpler.

The static wrapper functions around the mpi.h function are named by removing the "MPI_" from the beginning of the native mpi function name. For instance the "Comm_rank()" function calls "MPI_Comm_rank()". Since all wrapper functions are static, the should be referred to by their scoped name, for example "MpiWrapper::commRank()".

Definition at line 108 of file MpiWrapper.hpp.

Member Enumeration Documentation

◆ Reduction

Strongly typed enum class for calling collective functions using MPI_Op

Enumerator
Max 

Max.

Min 

Min.

Sum 

Sum.

Prod 

Prod.

Definition at line 116 of file MpiWrapper.hpp.

Member Function Documentation

◆ activeWaitAny()

static int geos::MpiWrapper::activeWaitAny ( const int  count,
MPI_Request  array_of_requests[],
MPI_Status  array_of_statuses[],
std::function< MPI_Request(int) >  func 
)
static

Wait on MPI_Requests to complete on at a time and trigger a callback to process the completion.

Parameters
[in]countThe number of MPI_Requests being processed.
[in,out]array_of_requestsThe MPI_Requests to actively wait on.
[in]funcA callable object accepting an integer denoting the MPI_Request index which has completed.
Returns
MPI_SUCCESS or an MPI_ERROR returned by internal calls to MPI_WaitAny.

◆ activeWaitOrderedCompletePhase()

static int geos::MpiWrapper::activeWaitOrderedCompletePhase ( const int  participants,
std::vector< std::tuple< MPI_Request *, MPI_Status *, std::function< MPI_Request(int) > > > const &  phases 
)
static

Active blocking phased communication with multiple participants, each participant in each phase may depend on the previous phases being fully complete prior to entry into a subsequent phase.

Parameters
[in]participantsThe number of participants in each phase
[in]phasesA vector of function objects taking int and returning MPI_Requests denoting the state of that participant in that phase.
Note
The restriction on phase[N](index) being called is that phase[N-1](0 - (particpants-1)) and phaseN have all been called and the MPI_Requests from those calls have all been completed.
One can add a final recv phase by having that phase return MPI_REQUEST_NULL.
Returns
MPI_SUCCESS or and MPI_ERROR from internal calls to MPI_WaitAny.

◆ activeWaitSome()

static int geos::MpiWrapper::activeWaitSome ( const int  count,
MPI_Request  array_of_requests[],
MPI_Status  array_of_statuses[],
std::function< MPI_Request(int) >  func 
)
static

Wait on MPI_Requests to complete on or more at a time and trigger a callback to process the completion.

Parameters
[in]countThe number of MPI_Requests being processed.
[in,out]array_of_requestsThe MPI_Requests to actively wait on.
[in]funcA callable object accepting an integer denoting the MPI_Request index which has completed.
Returns
MPI_SUCCESS or an MPI_ERROR returned by internal calls to MPI_WaitSome.

◆ activeWaitSomeCompletePhase()

static int geos::MpiWrapper::activeWaitSomeCompletePhase ( const int  participants,
std::vector< std::tuple< MPI_Request *, MPI_Status *, std::function< MPI_Request(int) > > > const &  phases 
)
static

Active non-blocking phased communication with multiple participants, each participant in each phase may depend on the previous phases being fully complete prior to entry into a subsequent phase.

Parameters
[in]participantsThe number of participants in each phase
[in]phasesA vector of function objects taking int and returning MPI_Requests denoting the state of that participant in that phase.
Note
The restriction on phase[N](index) being called is that phase[N-1](0 - (particpants-1)) have all been called and the MPI_Requests from those calls have all been completed.
One can add a final recv phase by having that phase return MPI_REQUEST_NULL.
Returns
MPI_SUCCESS or and MPI_ERROR from internal calls to MPI_WaitAny.

◆ allGather()

template<typename T >
static void geos::MpiWrapper::allGather ( T const  myValue,
array1d< T > &  allValues,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for MPI_Allgather.

Template Parameters
TThe type to send/recieve. This must have a valid conversion to MPI_Datatype in getMpiType();
Parameters
[in]myValueThe value to send.
[out]allValuesThe values recived from each rank.

◆ allgather()

template<typename T_SEND , typename T_RECV >
static int geos::MpiWrapper::allgather ( T_SEND const *  sendbuf,
int  sendcount,
T_RECV *  recvbuf,
int  recvcount,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Allgather.

Template Parameters
T_SENDThe pointer type for sendbuf
T_RECVThe pointer type for recvbuf
Parameters
[in]sendbufThe pointer to the sending buffer.
[in]sendcountThe number of values to send.
[out]recvbufThe pointer to the receive buffer.
[in]recvcountThe number of values to receive.
[in]commThe MPI_Comm over which the gather operates.
Returns
The return value of the underlying call to MPI_Allgather().

◆ allgatherv()

template<typename T_SEND , typename T_RECV >
static int geos::MpiWrapper::allgatherv ( T_SEND const *  sendbuf,
int  sendcount,
T_RECV *  recvbuf,
int *  recvcounts,
int *  displacements,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Allgatherv.

Template Parameters
T_SENDThe pointer type for sendbuf
T_RECVThe pointer type for recvbuf
Parameters
[in]sendbufThe pointer to the sending buffer.
[in]sendcountThe number of values to send.
[out]recvbufThe pointer to the receive buffer.
[in]recvcountsThe number of values to receive.
[in]displacementsAn array containing the displacement to apply to the message received by each process
[in]commThe MPI_Comm over which the gather operates.
Returns
The return value of the underlying call to MPI_Allgatherv().

◆ allReduce() [1/3]

template<typename T >
void geos::MpiWrapper::allReduce ( Span< T const >  src,
Span< T >  dst,
Reduction const  op,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience wrapper for the MPI_Allreduce function. Version for sequences.

Template Parameters
Ttype of data to reduce. Must correspond to a valid MPI_Datatype.
Parameters
src[in]The values to send to the reduction.
dst[out]The resulting values.
opThe Reduction enum to perform.
commThe communicator.

Definition at line 1051 of file MpiWrapper.hpp.

◆ allReduce() [2/3]

template<typename T >
T geos::MpiWrapper::allReduce ( T const &  value,
Reduction const  op,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience wrapper for the MPI_Allreduce function.

Template Parameters
Ttype of data to reduce. Must correspond to a valid MPI_Datatype.
Parameters
valueThe value to send to the reduction.
opThe Reduction enum to perform.
commThe communicator.
Returns
The value of reduction across all ranks

Definition at line 1043 of file MpiWrapper.hpp.

◆ allReduce() [3/3]

template<typename T >
static int geos::MpiWrapper::allReduce ( T const *  sendbuf,
T *  recvbuf,
int  count,
MPI_Op  op,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Strongly typed wrapper around MPI_Allreduce.

Parameters
[in]sendbufThe pointer to the sending buffer.
[out]recvbufThe pointer to the receive buffer.
[in]countThe number of values to send/receive.
[in]opThe MPI_Op to perform.
[in]commThe MPI_Comm over which the gather operates.
Returns
The return value of the underlying call to MPI_Allreduce().

◆ bcast()

template<typename T >
static int geos::MpiWrapper::bcast ( T *  buffer,
int  count,
int  root,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Bcast.

Parameters
[in/out]buffer The pointer to the send/recv buffer
[in]countThe number of data types to send.
[in]rootThe rank sending the data.
[in]commThe MPI_Comm over which the MPI_Bcast operates.
Returns
The return value of the underlying call to MPI_Bcast().

◆ broadcast()

template<typename T >
static void geos::MpiWrapper::broadcast ( T &  value,
int  srcRank = 0,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for MPI_Broadcast.

Template Parameters
TThe type to send/recieve. This must have a valid conversion to MPI_Type in getMpiType();
Parameters
[in/out]myValue The value to send from the srcRank to the other ranks.
srcRankThe rank that is sending the value.

◆ check()

static int geos::MpiWrapper::check ( MPI_Request *  request,
int *  flag,
MPI_Status status 
)
static

The same as test but doesn't deallocate requests regardless of their source.

Parameters
[in]requestThe MPI_Request to check for completion
[out]flagWhether the request has completed or not
[out]statusThe current status of the request

◆ checkAll()

static int geos::MpiWrapper::checkAll ( int  count,
MPI_Request  array_of_requests[],
int *  flag,
MPI_Status  array_of_statuses[] 
)
static

The same as testall but doesn't deallocate requests regardless of their source.

Note
Since this doesn't deallocate or set request to MPI_REQUEST_NULL repeated calls to it with a set of already-completed requests will continue to return.
Parameters
[in]countThe number of requests in the array to check
[in]requestThe MPI_Requests to check for completion
[out]flagWhether all requests have completed or not
[out]statusThe current status of all requests

◆ checkAny()

static int geos::MpiWrapper::checkAny ( int  count,
MPI_Request  array_of_requests[],
int *  idx,
int *  flag,
MPI_Status  array_of_statuses[] 
)
static

The same as testany but doesn't deallocate requests regardless of their source.

Note
Since this doesn't deallocate or set request to MPI_REQUEST_NULL repeated calls to it with the same set of requests will return the first completed request each time thus if the first request is complete, that will always be returned unless the user deallocates/overwrites the request with MPI_REQUEST_NULL
Parameters
[in]countThe number of requests in the array to check
[in]requestThe MPI_Requests to check for completion
[out]idxThe index of the first request in the array that is complete
[out]flagWhether a request has completed or not
[out]statusThe current status of all requests

◆ gather()

template<typename TS , typename TR >
static int geos::MpiWrapper::gather ( TS const *const  sendbuf,
int  sendcount,
TR *const  recvbuf,
int  recvcount,
int  root,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Gather().

Template Parameters
TSThe pointer type for sendbuf
TRThe pointer type for recvbuf
Parameters
[in]sendbufThe pointer to the sending buffer.
[in]sendcountThe number of values to send.
[out]recvbufThe pointer to the receive buffer.
[in]recvcountThe number of values to receive.
[in]rootThe rank recieving the data.
[in]commThe MPI_Comm over which the gather operates.
Returns

◆ gatherv()

template<typename TS , typename TR >
static int geos::MpiWrapper::gatherv ( TS const *const  sendbuf,
int  sendcount,
TR *const  recvbuf,
const int *  recvcounts,
const int *  displs,
int  root,
MPI_Comm  comm 
)
static

Strongly typed wrapper around MPI_Gatherv.

Template Parameters
TSThe pointer type for sendbuf
TRThe pointer type for recvbuf
Parameters
[in]sendbufThe pointer to the sending buffer.
[in]sendcountThe number of values to send.
[out]recvbufThe pointer to the receive buffer.
[in]recvcountThe number of values to receive.
[in]displsinteger array (of length group size). Entry i specifies the displacement relative to recvbuf at which to place the incoming data from process i (significant only at root).
[in]rootThe rank recieving the data.
[in]commThe MPI_Comm over which the gather operates.
Returns

◆ getMpiOp()

MPI_Op geos::MpiWrapper::getMpiOp ( Reduction const  op)
inlinestatic

Returns an MPI_Op associated with our strongly typed Reduction enum.

Parameters
[in]opThe value of the Reduction enum to get an MPI_Op for.
Returns
The MPI_Op associated with op.

Definition at line 644 of file MpiWrapper.hpp.

◆ iRecv()

template<typename T >
static int geos::MpiWrapper::iRecv ( T *const  buf,
int  count,
int  source,
int  tag,
MPI_Comm  comm,
MPI_Request *  request 
)
static

Strongly typed wrapper around MPI_Irecv()

Parameters
[out]bufThe pointer to the buffer that contains the data to be received.
[in]countThe number of elements in buf
[in]sourceThe rank of the source process within comm.
[in]tagThe message tag that is be used to distinguish different types of messages
[in]commThe handle to the MPI_Comm
[out]requestPointer to the MPI_Request associated with this request.
Returns

◆ iSend()

template<typename T >
static int geos::MpiWrapper::iSend ( T const *const  buf,
int  count,
int  dest,
int  tag,
MPI_Comm  comm,
MPI_Request *  request 
)
static

Strongly typed wrapper around MPI_Isend()

Parameters
[in]bufThe pointer to the buffer that contains the data to be sent.
[in]countThe number of elements in buf.
[in]destThe rank of the destination process within comm.
[in]tagThe message tag that is be used to distinguish different types of messages.
[in]commThe handle to the MPI_Comm.
[out]requestPointer to the MPI_Request associated with this request.
Returns

◆ max() [1/2]

template<typename T >
void geos::MpiWrapper::max ( Span< T const >  src,
Span< T >  dst,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Allreduce using a MPI_MAX operation.

Parameters
[in]valuethe value to send into the reduction.
[out]dstThe resulting values.
Returns
The maximum of all value across the ranks.

Definition at line 1088 of file MpiWrapper.hpp.

◆ max() [2/2]

template<typename T >
T geos::MpiWrapper::max ( T const &  value,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Allreduce using a MPI_MAX operation.

Parameters
[in]valuethe value to send into the reduction.
Returns
The maximum of all value across the ranks.

Definition at line 1082 of file MpiWrapper.hpp.

◆ min() [1/2]

template<typename T >
void geos::MpiWrapper::min ( Span< T const >  src,
Span< T >  dst,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Allreduce using a MPI_MIN operation.

Parameters
[in]srcthe value to send into the reduction.
[out]dstThe resulting values.
Returns
The minimum of all value across the ranks.

Definition at line 1076 of file MpiWrapper.hpp.

◆ min() [2/2]

template<typename T >
T geos::MpiWrapper::min ( T const &  value,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Allreduce using a MPI_MIN operation.

Parameters
valuethe value to send into the reduction.
Returns
The minimum of all value across the ranks.

Definition at line 1070 of file MpiWrapper.hpp.

◆ nodeCommSize()

static int geos::MpiWrapper::nodeCommSize ( )
static

Compute the number of ranks allocated on the same node.

Returns
The number of MPI ranks on the current node.

◆ prefixSum()

template<typename U , typename T >
U geos::MpiWrapper::prefixSum ( T const  value,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Compute exclusive prefix sum and full sum.

Template Parameters
Ttype of local (rank) value
Utype of global (sum) value
Parameters
[in]valuethe local value
Returns
a pair where first is the prefix sum, second is the full sum

Definition at line 1024 of file MpiWrapper.hpp.

◆ reduce() [1/3]

template<typename T >
void geos::MpiWrapper::reduce ( Span< T const >  src,
Span< T >  dst,
Reduction const  op,
int  root,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience wrapper for the MPI_Reduce function. Version for sequences.

Template Parameters
Ttype of data to reduce. Must correspond to a valid MPI_Datatype.
Parameters
src[in]The values to send to the reduction.
dst[out]The resulting values (only significant at root).
opThe Reduction enum to perform.
commThe communicator.

Definition at line 1103 of file MpiWrapper.hpp.

◆ reduce() [2/3]

template<typename T >
T geos::MpiWrapper::reduce ( T const &  value,
Reduction const  op,
int  root,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience wrapper for the MPI_Reduce function.

Template Parameters
Ttype of data to reduce. Must correspond to a valid MPI_Datatype.
Parameters
valueThe value to send to the reduction.
opThe Reduction enum to perform.
commThe communicator.
Returns
The value of reduction (only significant at root)

Definition at line 1095 of file MpiWrapper.hpp.

◆ reduce() [3/3]

template<typename T >
static int geos::MpiWrapper::reduce ( T const *  sendbuf,
T *  recvbuf,
int  count,
MPI_Op  op,
int  root,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Strongly typed wrapper around MPI_Reduce.

Parameters
[in]sendbufThe pointer to the sending buffer.
[out]recvbufThe pointer to the receive buffer (only significant at root).
[in]countThe number of values to send/receive.
[in]opThe MPI_Op to perform.
[in]commThe MPI_Comm over which the gather operates.
Returns
The return value of the underlying call to MPI_Reduce().

◆ sum() [1/2]

template<typename T >
void geos::MpiWrapper::sum ( Span< T const >  src,
Span< T >  dst,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Allreduce using a MPI_SUM operation.

Parameters
[in]srcthe value to send into the reduction.
[out]dstThe resulting values.
Returns
The sum of all value across the ranks.

Definition at line 1064 of file MpiWrapper.hpp.

◆ sum() [2/2]

template<typename T >
T geos::MpiWrapper::sum ( T const &  value,
MPI_Comm  comm = MPI_COMM_GEOSX 
)
static

Convenience function for a MPI_Allreduce using a MPI_SUM operation.

Parameters
[in]valuethe value to send into the reduction.
Returns
The sum of all value across the ranks.

Definition at line 1058 of file MpiWrapper.hpp.


The documentation for this struct was generated from the following file: