GEOS
MpiWrapper.hpp
Go to the documentation of this file.
1 /*
2  * ------------------------------------------------------------------------------------------------------------
3  * SPDX-License-Identifier: LGPL-2.1-only
4  *
5  * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
6  * Copyright (c) 2018-2024 TotalEnergies
7  * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
8  * Copyright (c) 2023-2024 Chevron
9  * Copyright (c) 2019- GEOS/GEOSX Contributors
10  * All rights reserved
11  *
12  * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
13  * ------------------------------------------------------------------------------------------------------------
14  */
15 
20 #ifndef GEOS_COMMON_MPIWRAPPER_HPP_
21 #define GEOS_COMMON_MPIWRAPPER_HPP_
22 
23 #include "common/DataTypes.hpp"
24 #include "common/Span.hpp"
25 #include "common/TypesHelpers.hpp"
26 
27 #include <numeric>
28 
29 #if defined(GEOS_USE_MPI)
30  #include <mpi.h>
31 #define MPI_PARAM( x ) x
32 #else
33 #define MPI_PARAM( x )
34 typedef int MPI_Comm;
35 
36 #define MPI_COMM_NULL ((MPI_Comm)0x04000000)
37 #define MPI_COMM_WORLD ((MPI_Comm)0x44000000)
38 #define MPI_COMM_SELF ((MPI_Comm)0x40000000)
39 
40 
41 typedef int MPI_Datatype;
42 #define MPI_CHAR ((MPI_Datatype)0x4c000101)
43 #define MPI_SIGNED_CHAR ((MPI_Datatype)0x4c000118)
44 #define MPI_UNSIGNED_CHAR ((MPI_Datatype)0x4c000102)
45 #define MPI_BYTE ((MPI_Datatype)0x4c00010d)
46 #define MPI_WCHAR ((MPI_Datatype)0x4c00040e)
47 #define MPI_SHORT ((MPI_Datatype)0x4c000203)
48 #define MPI_UNSIGNED_SHORT ((MPI_Datatype)0x4c000204)
49 #define MPI_INT ((MPI_Datatype)0x4c000405)
50 #define MPI_UNSIGNED ((MPI_Datatype)0x4c000406)
51 #define MPI_LONG ((MPI_Datatype)0x4c000807)
52 #define MPI_UNSIGNED_LONG ((MPI_Datatype)0x4c000808)
53 #define MPI_FLOAT ((MPI_Datatype)0x4c00040a)
54 #define MPI_DOUBLE ((MPI_Datatype)0x4c00080b)
55 #define MPI_LONG_DOUBLE ((MPI_Datatype)0x4c00100c)
56 #define MPI_LONG_LONG_INT ((MPI_Datatype)0x4c000809)
57 #define MPI_UNSIGNED_LONG_LONG ((MPI_Datatype)0x4c000819)
58 #define MPI_LONG_LONG MPI_LONG_LONG_INT
59 
60 typedef int MPI_Op;
61 
62 #define MPI_MAX (MPI_Op)(0x58000001)
63 #define MPI_MIN (MPI_Op)(0x58000002)
64 #define MPI_SUM (MPI_Op)(0x58000003)
65 #define MPI_PROD (MPI_Op)(0x58000004)
66 #define MPI_LAND (MPI_Op)(0x58000005)
67 #define MPI_BAND (MPI_Op)(0x58000006)
68 #define MPI_LOR (MPI_Op)(0x58000007)
69 #define MPI_BOR (MPI_Op)(0x58000008)
70 #define MPI_LXOR (MPI_Op)(0x58000009)
71 #define MPI_BXOR (MPI_Op)(0x5800000a)
72 #define MPI_MINLOC (MPI_Op)(0x5800000b)
73 #define MPI_MAXLOC (MPI_Op)(0x5800000c)
74 #define MPI_REPLACE (MPI_Op)(0x5800000d)
75 #define MPI_NO_OP (MPI_Op)(0x5800000e)
76 
77 #define MPI_SUCCESS 0 /* Successful return code */
78 #define MPI_UNDEFINED (-32766)
79 #define MPI_STATUS_IGNORE (MPI_Status *)1
80 #define MPI_STATUSES_IGNORE (MPI_Status *)1
81 #define MPI_REQUEST_NULL ((MPI_Request)0x2c000000)
82 typedef int MPI_Request;
83 
84 typedef int MPI_Info;
85 #define MPI_INFO_NULL (MPI_Info)(0x60000000)
86 
87 struct MPI_Status
88 {
89  int junk;
90 };
91 
92 #endif
93 
94 #if defined(NDEBUG)
95 #define MPI_CHECK_ERROR( error ) ((void) error)
96 #else
97 #define MPI_CHECK_ERROR( error ) GEOS_ERROR_IF_NE( error, MPI_SUCCESS );
98 #endif
99 
100 
101 namespace geos
102 {
103 
105 #ifdef GEOS_USE_MPI
106 extern MPI_Comm MPI_COMM_GEOS;
107 #else
108 extern int MPI_COMM_GEOS;
109 #endif
110 
121 {
122 public:
123 
128  enum class Reduction
129  {
130  Max,
131  Min,
132  Sum,
133  Prod,
134  LogicalAnd,
135  LogicalOr,
136  };
137 
142  enum class PairReduction
143  {
144  Max,
145  Min,
146  };
147 
153  template< typename FIRST, typename SECOND >
154  struct PairType
155  {
156  FIRST first;
157  SECOND second;
158  };
159 
160  MpiWrapper() = delete;
161 
175 
176  static void barrier( MPI_Comm const & MPI_PARAM( comm )=MPI_COMM_GEOS );
177 
178  static int cartCoords( MPI_Comm comm, int rank, int maxdims, int coords[] );
179 
180  static int cartCreate( MPI_Comm comm_old, int ndims, const int dims[], const int periods[],
181  int reorder, MPI_Comm * comm_cart );
182 
183  static int cartRank( MPI_Comm comm, const int coords[] );
184 
185  static void commFree( MPI_Comm & comm );
186 
187  static int commRank( MPI_Comm const & MPI_PARAM( comm )=MPI_COMM_GEOS );
188 
189  static int commSize( MPI_Comm const & MPI_PARAM( comm )=MPI_COMM_GEOS );
190 
191  static bool commCompare( MPI_Comm const & comm1, MPI_Comm const & comm2 );
192 
193  static bool initialized();
194 
195  static int init( int * argc, char * * * argv );
196 
201  static void finalize();
202 
203  static MPI_Comm commDup( MPI_Comm const comm );
204 
205  static MPI_Comm commSplit( MPI_Comm const comm, int color, int key );
206 
207  static int test( MPI_Request * request, int * flag, MPI_Status * status );
208 
209  static int testAny( int count, MPI_Request array_of_requests[], int * idx, int * flags, MPI_Status array_of_statuses[] );
210 
211  static int testSome( int count, MPI_Request array_of_requests[], int * outcount, int array_of_indices[], MPI_Status array_of_statuses[] );
212 
213  static int testAll( int count, MPI_Request array_of_requests[], int * flags, MPI_Status array_of_statuses[] );
214 
221  static int check( MPI_Request * request, int * flag, MPI_Status * status );
222 
235  static int checkAny( int count, MPI_Request array_of_requests[], int * idx, int * flag, MPI_Status array_of_statuses[] );
236 
246  static int checkAll( int count, MPI_Request array_of_requests[], int * flag, MPI_Status array_of_statuses[] );
247 
248  static int wait( MPI_Request * request, MPI_Status * status );
249 
250  static int waitAny( int count, MPI_Request array_of_requests[], int * indx, MPI_Status array_of_statuses[] );
251 
252  static int waitSome( int count, MPI_Request array_of_requests[], int * outcount, int array_of_indices[], MPI_Status array_of_statuses[] );
253 
254  static int waitAll( int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[] );
255 
256  static double wtime( void );
257 
258 
268  static int activeWaitAny( const int count,
269  MPI_Request array_of_requests[],
270  MPI_Status array_of_statuses[],
271  std::function< MPI_Request ( int ) > func );
272 
282  static int activeWaitSome( const int count,
283  MPI_Request array_of_requests[],
284  MPI_Status array_of_statuses[],
285  std::function< MPI_Request ( int ) > func );
286 
299  static int activeWaitSomeCompletePhase( const int participants,
300  std::vector< std::tuple< MPI_Request *, MPI_Status *, std::function< MPI_Request ( int ) > > > const & phases );
301 
315  static int activeWaitOrderedCompletePhase( const int participants,
316  std::vector< std::tuple< MPI_Request *, MPI_Status *, std::function< MPI_Request ( int ) > > > const & phases );
318 
319 #if !defined(GEOS_USE_MPI)
320  static std::map< int, std::pair< int, void * > > & getTagToPointersMap()
321  {
322  static std::map< int, std::pair< int, void * > > tagToPointers;
323  return tagToPointers;
324  }
325 #endif
326 
331  static int nodeCommSize();
332 
344  template< typename T_SEND, typename T_RECV >
345  static int allgather( T_SEND const * sendbuf,
346  int sendcount,
347  T_RECV * recvbuf,
348  int recvcount,
349  MPI_Comm comm = MPI_COMM_GEOS );
350 
363  template< typename T_SEND, typename T_RECV >
364  static int allgatherv( T_SEND const * sendbuf,
365  int sendcount,
366  T_RECV * recvbuf,
367  int * recvcounts,
368  int * displacements,
369  MPI_Comm comm = MPI_COMM_GEOS );
370 
377  template< typename T >
378  static void allGather( T const myValue, array1d< T > & allValues, MPI_Comm comm = MPI_COMM_GEOS );
379 
380  template< typename T >
381  static int allGather( arrayView1d< T const > const & sendbuf,
382  array1d< T > & recvbuf,
383  MPI_Comm comm = MPI_COMM_GEOS );
384 
393  template< typename T >
394  static T allReduce( T const & value, Reduction const op, MPI_Comm comm = MPI_COMM_GEOS );
395 
404  template< typename T >
405  static void allReduce( Span< T const > src, Span< T > dst, Reduction const op, MPI_Comm comm = MPI_COMM_GEOS );
406 
415  template< typename SRC_CONTAINER_TYPE, typename DST_CONTAINER_TYPE >
416  static void allReduce( SRC_CONTAINER_TYPE const & src, DST_CONTAINER_TYPE & dst, Reduction const op, MPI_Comm const comm = MPI_COMM_GEOS );
417 
427  template< typename SRC_CONTAINER_TYPE, typename DST_CONTAINER_TYPE >
428  static void allReduce( SRC_CONTAINER_TYPE const & src, DST_CONTAINER_TYPE & dst, int const count, Reduction const op, MPI_Comm const comm );
429 
439  template< typename FIRST, typename SECOND, PairReduction OP >
441  MPI_Comm comm = MPI_COMM_GEOS );
442 
453  template< typename FIRST, typename SECOND, typename CONTAINER, PairReduction OP >
454  static PairType< FIRST, SECOND > allReduce( CONTAINER const & pairs,
455  MPI_Comm comm = MPI_COMM_GEOS );
456 
466  template< typename T >
467  static int reduce( T const * sendbuf, T * recvbuf, int count, MPI_Op op, int root, MPI_Comm comm = MPI_COMM_GEOS );
468 
477  template< typename T >
478  static T reduce( T const & value, Reduction const op, int root, MPI_Comm comm = MPI_COMM_GEOS );
479 
488  template< typename T >
489  static void reduce( Span< T const > src, Span< T > dst, Reduction const op, int root, MPI_Comm comm = MPI_COMM_GEOS );
490 
491 
492  template< typename T >
493  static int scan( T const * sendbuf, T * recvbuf, int count, MPI_Op op, MPI_Comm comm = MPI_COMM_GEOS );
494 
495  template< typename T >
496  static int exscan( T const * sendbuf, T * recvbuf, int count, MPI_Op op, MPI_Comm comm = MPI_COMM_GEOS );
497 
506  template< typename T >
507  static int bcast( T * buffer, int count, int root, MPI_Comm comm = MPI_COMM_GEOS );
508 
509 
516  template< typename T >
517  static void broadcast( T & value, int srcRank = 0, MPI_Comm comm = MPI_COMM_GEOS );
518 
531  template< typename TS, typename TR >
532  static int gather( TS const * const sendbuf,
533  int sendcount,
534  TR * const recvbuf,
535  int recvcount,
536  int root,
537  MPI_Comm comm = MPI_COMM_GEOS );
538 
550  template< typename T, typename DST_CONTAINER,
551  typename = std::enable_if_t<
552  std::is_trivially_copyable_v< T > &&
553  std::is_same_v< decltype(std::declval< DST_CONTAINER >().size()), std::size_t > &&
554  std::is_same_v< decltype(std::declval< DST_CONTAINER >().data()), T * > > >
555  static int gather( T const & value,
556  DST_CONTAINER & destValuesBuffer,
557  int root,
558  MPI_Comm comm = MPI_COMM_GEOS );
559 
574  template< typename TS, typename TR >
575  static int gatherv( TS const * const sendbuf,
576  int sendcount,
577  TR * const recvbuf,
578  const int * recvcounts,
579  const int * displs,
580  int root,
581  MPI_Comm comm = MPI_COMM_GEOS );
582 
588  static MPI_Op getMpiOp( Reduction const op );
589 
590  template< typename T >
591  static int recv( array1d< T > & buf,
592  int MPI_PARAM( source ),
593  int tag,
594  MPI_Comm MPI_PARAM( comm ),
595  MPI_Status * MPI_PARAM( request ) );
596 
597  template< typename T >
598  static int iSend( arrayView1d< T > const & buf,
599  int MPI_PARAM( dest ),
600  int tag,
601  MPI_Comm MPI_PARAM( comm ),
602  MPI_Request * MPI_PARAM( request ) );
603 
614  template< typename T >
615  static int iRecv( T * const buf,
616  int count,
617  int source,
618  int tag,
619  MPI_Comm comm,
620  MPI_Request * request );
621 
632  template< typename T >
633  static int iSend( T const * const buf,
634  int count,
635  int dest,
636  int tag,
637  MPI_Comm comm,
638  MPI_Request * request );
639 
647  template< typename U, typename T >
648  static U prefixSum( T const value, MPI_Comm comm = MPI_COMM_GEOS );
649 
655  template< typename T >
656  static T sum( T const & value, MPI_Comm comm = MPI_COMM_GEOS );
657 
664  template< typename T >
665  static void sum( Span< T const > src, Span< T > dst, MPI_Comm comm = MPI_COMM_GEOS );
666 
672  template< typename T >
673  static T min( T const & value, MPI_Comm comm = MPI_COMM_GEOS );
674 
681  template< typename T >
682  static void min( Span< T const > src, Span< T > dst, MPI_Comm comm = MPI_COMM_GEOS );
683 
690  template< typename FIRST, typename SECOND >
691  static PairType< FIRST, SECOND > min( PairType< FIRST, SECOND > const & pair, MPI_Comm comm = MPI_COMM_GEOS );
692 
699  template< typename FIRST, typename SECOND, typename CONTAINER >
700  static PairType< FIRST, SECOND > min( CONTAINER const & pairs, MPI_Comm comm = MPI_COMM_GEOS );
701 
707  template< typename T >
708  static T max( T const & value, MPI_Comm comm = MPI_COMM_GEOS );
709 
716  template< typename T >
717  static void max( Span< T const > src, Span< T > dst, MPI_Comm comm = MPI_COMM_GEOS );
718 
725  template< typename FIRST, typename SECOND >
726  static PairType< FIRST, SECOND > max( PairType< FIRST, SECOND > const & pair, MPI_Comm comm = MPI_COMM_GEOS );
727 
734  template< typename FIRST, typename SECOND, typename CONTAINER >
735  static PairType< FIRST, SECOND > max( CONTAINER const & pairs, MPI_Comm comm = MPI_COMM_GEOS );
736 
737 private:
738 
748  template< typename T >
749  static int allReduce( T const * sendbuf, T * recvbuf, int count, MPI_Op op, MPI_Comm comm = MPI_COMM_GEOS );
750 };
751 
752 namespace internal
753 {
754 
756 struct ManagedResources
757 {
758  // The list of managed MPI_Op instances
759  std::set< MPI_Op > m_mpiOps;
760 
761  // The list of managed MPI_Type instances
762  std::set< MPI_Datatype > m_mpiTypes;
763 
768  void finalize();
769 };
770 
775 ManagedResources & getManagedResources();
776 
777 template< typename T, typename ENABLE = void >
778 struct MpiTypeImpl {};
779 
780 #define ADD_MPI_TYPE_MAP( T, MPI_T ) \
781  template<> struct MpiTypeImpl< T > { static MPI_Datatype get() { return MPI_T; } }
782 
783 ADD_MPI_TYPE_MAP( float, MPI_FLOAT );
784 ADD_MPI_TYPE_MAP( double, MPI_DOUBLE );
785 
786 ADD_MPI_TYPE_MAP( char, MPI_CHAR );
787 ADD_MPI_TYPE_MAP( signed char, MPI_SIGNED_CHAR );
788 ADD_MPI_TYPE_MAP( unsigned char, MPI_UNSIGNED_CHAR );
789 
790 ADD_MPI_TYPE_MAP( int, MPI_INT );
791 ADD_MPI_TYPE_MAP( long int, MPI_LONG );
792 ADD_MPI_TYPE_MAP( long long int, MPI_LONG_LONG );
793 
794 ADD_MPI_TYPE_MAP( unsigned int, MPI_UNSIGNED );
795 ADD_MPI_TYPE_MAP( unsigned long int, MPI_UNSIGNED_LONG );
796 ADD_MPI_TYPE_MAP( unsigned long long int, MPI_UNSIGNED_LONG_LONG );
797 
798 #undef ADD_MPI_TYPE_MAP
799 
800 template< typename T >
801 struct MpiTypeImpl< T, std::enable_if_t< std::is_enum< T >::value > >
802 {
803  static MPI_Datatype get() { return MpiTypeImpl< std::underlying_type_t< T > >::get(); }
804 };
805 
806 template< typename T >
807 MPI_Datatype getMpiType()
808 {
809  return MpiTypeImpl< T >::get();
810 }
811 
812 template< typename FIRST, typename SECOND >
813 MPI_Datatype getMpiPairType()
814 {
815  static_assert( "no default implementation, please add a template specialization and add it in the \"testMpiWrapper\" unit test." );
816  return {};
817 }
818 template<> MPI_Datatype getMpiPairType< int, int >();
819 template<> MPI_Datatype getMpiPairType< long int, int >();
820 template<> MPI_Datatype getMpiPairType< long int, long int >();
821 template<> MPI_Datatype getMpiPairType< long long int, long long int >();
822 template<> MPI_Datatype getMpiPairType< float, int >();
823 template<> MPI_Datatype getMpiPairType< double, int >();
824 template<> MPI_Datatype getMpiPairType< double, long int >();
825 template<> MPI_Datatype getMpiPairType< double, long long int >();
826 template<> MPI_Datatype getMpiPairType< double, double >();
827 
828 // It is advised to always use this custom operator for pairs as MPI_MAXLOC is not a true lexicographical comparator.
829 template< typename FIRST, typename SECOND, MpiWrapper::PairReduction OP >
830 MPI_Op getMpiPairReductionOp()
831 {
832  static auto const createOpHolder = [] () {
833  using PAIR_T = MpiWrapper::PairType< FIRST, SECOND >;
834 
835  auto const customOpFunc =
836  []( void * invec, void * inoutvec, int * len, MPI_Datatype * )
837  {
838  for( int i = 0; i < *len; ++i )
839  {
840  PAIR_T & in = static_cast< PAIR_T * >(invec)[i];
841  PAIR_T & inout = static_cast< PAIR_T * >(inoutvec)[i];
842  if constexpr ( OP == MpiWrapper::PairReduction::Min )
843  {
844  if( std::tie( in.first, in.second ) < std::tie( inout.first, inout.second ) )
845  inout = in;
846  }
847  else
848  {
849  if( std::tie( in.first, in.second ) > std::tie( inout.first, inout.second ) )
850  inout = in;
851  }
852  }
853  };
854 
855  MPI_Op mpiOp;
856  GEOS_ERROR_IF_NE( MPI_Op_create( customOpFunc, 1, &mpiOp ), MPI_SUCCESS );
857  // Resource registered to be destroyed at MpiWrapper::finalize().
858  internal::getManagedResources().m_mpiOps.emplace( mpiOp );
859  return mpiOp;
860  };
861  // Static storage to ensure the MPI operation is created only once and reused for all calls to this function.
862  static MPI_Op mpiOp{ createOpHolder() };
863  return mpiOp;
864 }
865 
866 }
867 
868 inline MPI_Op MpiWrapper::getMpiOp( Reduction const op )
869 {
870  switch( op )
871  {
872  case Reduction::Sum:
873  {
874  return MPI_SUM;
875  }
876  case Reduction::Min:
877  {
878  return MPI_MIN;
879  }
880  case Reduction::Max:
881  {
882  return MPI_MAX;
883  }
884  case Reduction::Prod:
885  {
886  return MPI_PROD;
887  }
889  {
890  return MPI_LAND;
891  }
893  {
894  return MPI_LOR;
895  }
896  default:
897  GEOS_ERROR( "Unsupported reduction operation" );
898  return MPI_NO_OP;
899  }
900 }
901 
902 template< typename T_SEND, typename T_RECV >
903 int MpiWrapper::allgather( T_SEND const * const sendbuf,
904  int sendcount,
905  T_RECV * const recvbuf,
906  int recvcount,
907  MPI_Comm MPI_PARAM( comm ) )
908 {
909 #ifdef GEOS_USE_MPI
910  return MPI_Allgather( sendbuf, sendcount, internal::getMpiType< T_SEND >(),
911  recvbuf, recvcount, internal::getMpiType< T_RECV >(),
912  comm );
913 #else
914  static_assert( std::is_same< T_SEND, T_RECV >::value,
915  "MpiWrapper::allgather() for serial run requires send and receive buffers are of the same type" );
916  GEOS_ERROR_IF_NE_MSG( sendcount, recvcount, "sendcount is not equal to recvcount." );
917  std::copy( sendbuf, sendbuf + sendcount, recvbuf )
918  return 0;
919 #endif
920 }
921 
922 template< typename T_SEND, typename T_RECV >
923 int MpiWrapper::allgatherv( T_SEND const * const sendbuf,
924  int sendcount,
925  T_RECV * const recvbuf,
926  int * recvcounts,
927  int * displacements,
928  MPI_Comm MPI_PARAM( comm ) )
929 {
930 #ifdef GEOS_USE_MPI
931  return MPI_Allgatherv( sendbuf, sendcount, internal::getMpiType< T_SEND >(),
932  recvbuf, recvcounts, displacements, internal::getMpiType< T_RECV >(),
933  comm );
934 #else
935  static_assert( std::is_same< T_SEND, T_RECV >::value,
936  "MpiWrapper::allgatherv() for serial run requires send and receive buffers are of the same type" );
937  GEOS_ERROR_IF_NE_MSG( sendcount, recvcount, "sendcount is not equal to recvcount." );
938  std::copy( sendbuf, sendbuf + sendcount, recvbuf )
939  return 0;
940 #endif
941 }
942 
943 
944 template< typename T >
945 void MpiWrapper::allGather( T const myValue, array1d< T > & allValues, MPI_Comm MPI_PARAM( comm ) )
946 {
947 #ifdef GEOS_USE_MPI
948  int const mpiSize = commSize( comm );
949  allValues.resize( mpiSize );
950 
951  MPI_Datatype const MPI_TYPE = internal::getMpiType< T >();
952 
953  MPI_Allgather( &myValue, 1, MPI_TYPE, allValues.data(), 1, MPI_TYPE, comm );
954 
955 #else
956  allValues.resize( 1 );
957  allValues[0] = myValue;
958 #endif
959 }
960 
961 template< typename T >
962 int MpiWrapper::allGather( arrayView1d< T const > const & sendValues,
963  array1d< T > & allValues,
964  MPI_Comm MPI_PARAM( comm ) )
965 {
966  int const sendSize = LvArray::integerConversion< int >( sendValues.size() );
967 #ifdef GEOS_USE_MPI
968  int const mpiSize = commSize( comm );
969  allValues.resize( mpiSize * sendSize );
970  return MPI_Allgather( sendValues.data(),
971  sendSize,
972  internal::getMpiType< T >(),
973  allValues.data(),
974  sendSize,
975  internal::getMpiType< T >(),
976  comm );
977 
978 #else
979  allValues.resize( sendSize );
980  for( localIndex a=0; a<sendSize; ++a )
981  {
982  allValues[a] = sendValues[a];
983  }
984  return 0;
985 #endif
986 }
987 
988 template< typename T >
989 int MpiWrapper::allReduce( T const * const sendbuf,
990  T * const recvbuf,
991  int const count,
992  MPI_Op const MPI_PARAM( op ),
993  MPI_Comm const MPI_PARAM( comm ) )
994 {
995 #ifdef GEOS_USE_MPI
996  MPI_Datatype const mpiType = internal::getMpiType< T >();
997  return MPI_Allreduce( sendbuf == recvbuf ? MPI_IN_PLACE : sendbuf, recvbuf, count, mpiType, op, comm );
998 #else
999  if( sendbuf != recvbuf )
1000  {
1001  memcpy( recvbuf, sendbuf, count * sizeof( T ) );
1002  }
1003  return 0;
1004 #endif
1005 }
1006 
1007 template< typename T >
1008 int MpiWrapper::reduce( T const * const sendbuf,
1009  T * const recvbuf,
1010  int const count,
1011  MPI_Op const MPI_PARAM( op ),
1012  int root,
1013  MPI_Comm const MPI_PARAM( comm ) )
1014 {
1015 #ifdef GEOS_USE_MPI
1016  MPI_Datatype const mpiType = internal::getMpiType< T >();
1017  return MPI_Reduce( sendbuf == recvbuf ? MPI_IN_PLACE : sendbuf, recvbuf, count, mpiType, op, root, comm );
1018 #else
1019  if( sendbuf != recvbuf )
1020  {
1021  memcpy( recvbuf, sendbuf, count * sizeof( T ) );
1022  }
1023  return 0;
1024 #endif
1025 }
1026 
1027 template< typename T >
1028 int MpiWrapper::scan( T const * const sendbuf,
1029  T * const recvbuf,
1030  int count,
1031  MPI_Op MPI_PARAM( op ),
1032  MPI_Comm MPI_PARAM( comm ) )
1033 {
1034 #ifdef GEOS_USE_MPI
1035  return MPI_Scan( sendbuf, recvbuf, count, internal::getMpiType< T >(), op, comm );
1036 #else
1037  memcpy( recvbuf, sendbuf, count*sizeof(T) );
1038  return 0;
1039 #endif
1040 }
1041 
1042 template< typename T >
1043 int MpiWrapper::exscan( T const * const MPI_PARAM( sendbuf ),
1044  T * const recvbuf,
1045  int count,
1046  MPI_Op MPI_PARAM( op ),
1047  MPI_Comm MPI_PARAM( comm ) )
1048 {
1049 #ifdef GEOS_USE_MPI
1050  return MPI_Exscan( sendbuf, recvbuf, count, internal::getMpiType< T >(), op, comm );
1051 #else
1052  memset( recvbuf, 0, count*sizeof(T) );
1053  return 0;
1054 #endif
1055 }
1056 
1057 template< typename T >
1058 int MpiWrapper::bcast( T * const MPI_PARAM( buffer ),
1059  int MPI_PARAM( count ),
1060  int MPI_PARAM( root ),
1061  MPI_Comm MPI_PARAM( comm ) )
1062 {
1063 #ifdef GEOS_USE_MPI
1064  return MPI_Bcast( buffer, count, internal::getMpiType< T >(), root, comm );
1065 #else
1066  return 0;
1067 #endif
1068 
1069 }
1070 
1071 template< typename T >
1072 void MpiWrapper::broadcast( T & MPI_PARAM( value ), int MPI_PARAM( srcRank ), MPI_Comm MPI_PARAM( comm ) )
1073 {
1074 #ifdef GEOS_USE_MPI
1075  MPI_Bcast( &value, 1, internal::getMpiType< T >(), srcRank, comm );
1076 #endif
1077 }
1078 
1079 template<>
1080 inline
1081 void MpiWrapper::broadcast< string >( string & MPI_PARAM( value ),
1082  int MPI_PARAM( srcRank ),
1083  MPI_Comm MPI_PARAM( comm ) )
1084 {
1085 #ifdef GEOS_USE_MPI
1086  int size = LvArray::integerConversion< int >( value.size() );
1087  broadcast( size, srcRank, comm );
1088  value.resize( size );
1089  MPI_Bcast( const_cast< char * >( value.data() ), size, internal::getMpiType< char >(), srcRank, comm );
1090 #endif
1091 }
1092 
1093 template< typename TS, typename TR >
1094 int MpiWrapper::gather( TS const * const sendbuf,
1095  int sendcount,
1096  TR * const recvbuf,
1097  int recvcount,
1098  int MPI_PARAM( root ),
1099  MPI_Comm MPI_PARAM( comm ) )
1100 {
1101 #ifdef GEOS_USE_MPI
1102  return MPI_Gather( sendbuf, sendcount, internal::getMpiType< TS >(),
1103  recvbuf, recvcount, internal::getMpiType< TR >(),
1104  root, comm );
1105 #else
1106  static_assert( std::is_same< TS, TR >::value,
1107  "MpiWrapper::gather() for serial run requires send and receive buffers are of the same type" );
1108  std::size_t const sendBufferSize = sendcount * sizeof(TS);
1109  std::size_t const recvBufferSize = recvcount * sizeof(TR);
1110  GEOS_ERROR_IF_NE_MSG( sendBufferSize, recvBufferSize, "size of send buffer and receive buffer are not equal" );
1111  memcpy( recvbuf, sendbuf, sendBufferSize );
1112  return 0;
1113 #endif
1114 }
1115 
1116 template< typename T, typename DST_CONTAINER, typename >
1117 int MpiWrapper::gather( T const & value,
1118  DST_CONTAINER & destValuesBuffer,
1119  int root,
1120  MPI_Comm MPI_PARAM( comm ) )
1121 {
1122  if( commRank() == 0 )
1123  GEOS_ERROR_IF_LT_MSG( destValuesBuffer.size(), size_t( commSize() ),
1124  "Receive buffer is not large enough to contain the values to receive." );
1125 #ifdef GEOS_USE_MPI
1126  return MPI_Gather( &value, sizeof( T ), internal::getMpiType< uint8_t >(),
1127  destValuesBuffer.data(), sizeof( T ), internal::getMpiType< uint8_t >(),
1128  root, comm );
1129 #else
1130  memcpy( destValuesBuffer.data(), &value, sendBufferSize );
1131  return 0;
1132 #endif
1133 }
1134 
1135 template< typename TS, typename TR >
1136 int MpiWrapper::gatherv( TS const * const sendbuf,
1137  int sendcount,
1138  TR * const recvbuf,
1139  const int * recvcounts,
1140  const int * MPI_PARAM( displs ),
1141  int MPI_PARAM( root ),
1142  MPI_Comm MPI_PARAM( comm ) )
1143 {
1144 #ifdef GEOS_USE_MPI
1145  return MPI_Gatherv( sendbuf, sendcount, internal::getMpiType< TS >(),
1146  recvbuf, recvcounts, displs, internal::getMpiType< TR >(),
1147  root, comm );
1148 #else
1149  static_assert( std::is_same< TS, TR >::value,
1150  "MpiWrapper::gather() for serial run requires send and receive buffers are of the same type" );
1151  std::size_t const sendBufferSize = sendcount * sizeof(TS);
1152  std::size_t const recvBufferSize = recvcounts[0] * sizeof(TR);
1153  GEOS_ERROR_IF_NE_MSG( sendBufferSize, recvBufferSize, "size of send buffer and receive buffer are not equal" );
1154  memcpy( recvbuf, sendbuf, sendBufferSize );
1155  return 0;
1156 #endif
1157 }
1158 
1159 template< typename T >
1160 int MpiWrapper::iRecv( T * const buf,
1161  int count,
1162  int MPI_PARAM( source ),
1163  int tag,
1164  MPI_Comm MPI_PARAM( comm ),
1165  MPI_Request * MPI_PARAM( request ) )
1166 {
1167 #ifdef GEOS_USE_MPI
1168  GEOS_ERROR_IF( (*request)!=MPI_REQUEST_NULL,
1169  "Attempting to use an MPI_Request that is still in use." );
1170  return MPI_Irecv( buf, count, internal::getMpiType< T >(), source, tag, comm, request );
1171 #else
1172  std::map< int, std::pair< int, void * > > & pointerMap = getTagToPointersMap();
1173  std::map< int, std::pair< int, void * > >::iterator iPointer = pointerMap.find( tag );
1174 
1175  if( iPointer==pointerMap.end() )
1176  {
1177  pointerMap.insert( {tag, {1, buf} } );
1178  }
1179  else
1180  {
1181  GEOS_ERROR_IF( iPointer->second.first != 0,
1182  "Tag does is assigned, but pointer was not set by iSend." );
1183  memcpy( buf, iPointer->second.second, count*sizeof(T) );
1184  pointerMap.erase( iPointer );
1185  }
1186  return 0;
1187 #endif
1188 }
1189 
1190 template< typename T >
1191 int MpiWrapper::recv( array1d< T > & buf,
1192  int MPI_PARAM( source ),
1193  int tag,
1194  MPI_Comm MPI_PARAM( comm ),
1195  MPI_Status * MPI_PARAM( request ) )
1196 {
1197 #ifdef GEOS_USE_MPI
1198  MPI_Status status;
1199  int count;
1200  MPI_Probe( source, tag, comm, &status );
1201  MPI_Get_count( &status, MPI_CHAR, &count );
1202 
1203  GEOS_ASSERT_EQ( count % sizeof( T ), 0 );
1204  buf.resize( count / sizeof( T ) );
1205 
1206  return MPI_Recv( reinterpret_cast< char * >( buf.data() ),
1207  count,
1208  MPI_CHAR,
1209  source,
1210  tag,
1211  comm,
1212  request );
1213 #else
1214  GEOS_ERROR( "Not implemented!" );
1215  return MPI_SUCCESS;
1216 #endif
1217 }
1218 
1219 template< typename T >
1220 int MpiWrapper::iSend( arrayView1d< T > const & buf,
1221  int MPI_PARAM( dest ),
1222  int tag,
1223  MPI_Comm MPI_PARAM( comm ),
1224  MPI_Request * MPI_PARAM( request ) )
1225 {
1226 #ifdef GEOS_USE_MPI
1227  GEOS_ERROR_IF( (*request)!=MPI_REQUEST_NULL,
1228  "Attempting to use an MPI_Request that is still in use." );
1229  return MPI_Isend( reinterpret_cast< void const * >( buf.data() ),
1230  buf.size() * sizeof( T ),
1231  MPI_CHAR,
1232  dest,
1233  tag,
1234  comm,
1235  request );
1236 #else
1237  GEOS_ERROR( "Not implemented." );
1238  return MPI_SUCCESS;
1239 #endif
1240 }
1241 
1242 template< typename T >
1243 int MpiWrapper::iSend( T const * const buf,
1244  int count,
1245  int MPI_PARAM( dest ),
1246  int tag,
1247  MPI_Comm MPI_PARAM( comm ),
1248  MPI_Request * MPI_PARAM( request ) )
1249 {
1250 #ifdef GEOS_USE_MPI
1251  GEOS_ERROR_IF( (*request)!=MPI_REQUEST_NULL,
1252  "Attempting to use an MPI_Request that is still in use." );
1253  return MPI_Isend( buf, count, internal::getMpiType< T >(), dest, tag, comm, request );
1254 #else
1255  std::map< int, std::pair< int, void * > > & pointerMap = getTagToPointersMap();
1256  std::map< int, std::pair< int, void * > >::iterator iPointer = pointerMap.find( tag );
1257 
1258  if( iPointer==pointerMap.end() )
1259  {
1260  pointerMap.insert( {tag, {0, const_cast< T * >(buf)}
1261  } );
1262  }
1263  else
1264  {
1265  GEOS_ERROR_IF( iPointer->second.first != 1,
1266  "Tag does is assigned, but pointer was not set by iRecv." );
1267  memcpy( iPointer->second.second, buf, count*sizeof(T) );
1268  pointerMap.erase( iPointer );
1269  }
1270  return 0;
1271 #endif
1272 }
1273 
1274 template< typename U, typename T >
1275 U MpiWrapper::prefixSum( T const value, MPI_Comm comm )
1276 {
1277  U localResult;
1278 
1279 #ifdef GEOS_USE_MPI
1280  U const convertedValue = value;
1281  int const error = MPI_Exscan( &convertedValue, &localResult, 1, internal::getMpiType< U >(), MPI_SUM, comm );
1282  MPI_CHECK_ERROR( error );
1283 #endif
1284  if( commRank() == 0 )
1285  {
1286  localResult = 0;
1287  }
1288 
1289  return localResult;
1290 }
1291 
1292 
1293 template< typename T >
1294 T MpiWrapper::allReduce( T const & value, Reduction const op, MPI_Comm const comm )
1295 {
1296  T result;
1297  allReduce( &value, &result, 1, getMpiOp( op ), comm );
1298  return result;
1299 }
1300 
1301 template< typename T >
1302 void MpiWrapper::allReduce( Span< T const > const src, Span< T > const dst, Reduction const op, MPI_Comm const comm )
1303 {
1304  GEOS_ASSERT_EQ( src.size(), dst.size() );
1305  allReduce( src.data(), dst.data(), LvArray::integerConversion< int >( src.size() ), getMpiOp( op ), comm );
1306 }
1307 
1308 template< typename SRC_CONTAINER_TYPE, typename DST_CONTAINER_TYPE >
1309 void MpiWrapper::allReduce( SRC_CONTAINER_TYPE const & src, DST_CONTAINER_TYPE & dst, int const count, Reduction const op, MPI_Comm const comm )
1310 {
1311  static_assert( std::is_trivially_copyable< typename get_value_type< SRC_CONTAINER_TYPE >::type >::value,
1312  "The type in the source container must be trivially copyable." );
1313  static_assert( std::is_trivially_copyable< typename get_value_type< DST_CONTAINER_TYPE >::type >::value,
1314  "The type in the destination container must be trivially copyable." );
1315  static_assert( std::is_same< typename get_value_type< SRC_CONTAINER_TYPE >::type,
1317  "Source and destination containers must have the same value type." );
1318  GEOS_ASSERT_GE( src.size(), count );
1319  GEOS_ASSERT_GE( dst.size(), count );
1320  allReduce( src.data(), dst.data(), count, getMpiOp( op ), comm );
1321 }
1322 
1323 template< typename SRC_CONTAINER_TYPE, typename DST_CONTAINER_TYPE >
1324 void MpiWrapper::allReduce( SRC_CONTAINER_TYPE const & src, DST_CONTAINER_TYPE & dst, Reduction const op, MPI_Comm const comm )
1325 {
1326  static_assert( std::is_trivially_copyable< typename get_value_type< SRC_CONTAINER_TYPE >::type >::value,
1327  "The type in the source container must be trivially copyable." );
1328  static_assert( std::is_trivially_copyable< typename get_value_type< DST_CONTAINER_TYPE >::type >::value,
1329  "The type in the destination container must be trivially copyable." );
1330  static_assert( std::is_same< typename get_value_type< SRC_CONTAINER_TYPE >::type,
1332  "Source and destination containers must have the same value type." );
1333  GEOS_ASSERT_EQ( src.size(), dst.size() );
1334  allReduce( src.data(), dst.data(), LvArray::integerConversion< int >( src.size() ), getMpiOp( op ), comm );
1335 }
1336 
1337 template< typename T >
1338 T MpiWrapper::sum( T const & value, MPI_Comm comm )
1339 {
1340  return MpiWrapper::allReduce( value, Reduction::Sum, comm );
1341 }
1342 
1343 template< typename T >
1344 void MpiWrapper::sum( Span< T const > src, Span< T > dst, MPI_Comm comm )
1345 {
1346  MpiWrapper::allReduce( src, dst, Reduction::Sum, comm );
1347 }
1348 
1349 template< typename T >
1350 T MpiWrapper::min( T const & value, MPI_Comm comm )
1351 {
1352  return MpiWrapper::allReduce( value, Reduction::Min, comm );
1353 }
1354 
1355 template< typename T >
1356 void MpiWrapper::min( Span< T const > src, Span< T > dst, MPI_Comm comm )
1357 {
1358  MpiWrapper::allReduce( src, dst, Reduction::Min, comm );
1359 }
1360 
1361 template< typename T >
1362 T MpiWrapper::max( T const & value, MPI_Comm comm )
1363 {
1364  return MpiWrapper::allReduce( value, Reduction::Max, comm );
1365 }
1366 
1367 template< typename T >
1368 void MpiWrapper::max( Span< T const > src, Span< T > dst, MPI_Comm comm )
1369 {
1370  MpiWrapper::allReduce( src, dst, Reduction::Max, comm );
1371 }
1372 
1373 
1374 template< typename T >
1375 T MpiWrapper::reduce( T const & value, Reduction const op, int root, MPI_Comm const comm )
1376 {
1377  T result;
1378  reduce( &value, &result, 1, getMpiOp( op ), root, comm );
1379  return result;
1380 }
1381 
1382 template< typename T >
1383 void MpiWrapper::reduce( Span< T const > const src, Span< T > const dst, Reduction const op, int root, MPI_Comm const comm )
1384 {
1385  GEOS_ASSERT_EQ( src.size(), dst.size() );
1386  reduce( src.data(), dst.data(), LvArray::integerConversion< int >( src.size() ), getMpiOp( op ), root, comm );
1387 }
1388 
1389 template< typename FIRST, typename SECOND, MpiWrapper::PairReduction const OP >
1391 MpiWrapper::allReduce( PairType< FIRST, SECOND > const & localPair, MPI_Comm comm )
1392 {
1393 #ifdef GEOS_USE_MPI
1394  auto const type = internal::getMpiPairType< FIRST, SECOND >();
1395  auto const mpiOp = internal::getMpiPairReductionOp< FIRST, SECOND, OP >();
1396  PairType< FIRST, SECOND > pair{ localPair.first, localPair.second };
1397  MPI_Allreduce( MPI_IN_PLACE, &pair, 1, type, mpiOp, comm );
1398  return pair;
1399 #else
1400  return localPair;
1401 #endif
1402 }
1403 
1404 template< typename FIRST, typename SECOND, typename CONTAINER, MpiWrapper::PairReduction const OP >
1406 MpiWrapper::allReduce( CONTAINER const & pairs, MPI_Comm const comm )
1407 {
1408  using PAIR_T = PairType< FIRST, SECOND >;
1409  std::function< PAIR_T( PAIR_T, PAIR_T ) > const getMin = []( PAIR_T const & a, PAIR_T const & b ) {
1410  return ( std::tie( a.first, a.second ) < std::tie( b.first, b.second ) ) ? a : b;
1411  };
1412  std::function< PAIR_T( PAIR_T, PAIR_T ) > const getMax = []( PAIR_T const & a, PAIR_T const & b ) {
1413  return ( std::tie( a.first, a.second ) > std::tie( b.first, b.second ) ) ? a : b;
1414  };
1415  PAIR_T const defaultPair{
1416  OP == PairReduction::Min ? std::numeric_limits< FIRST >::max() : std::numeric_limits< FIRST >::lowest(),
1417  OP == PairReduction::Min ? std::numeric_limits< SECOND >::max() : std::numeric_limits< SECOND >::lowest()
1418  };
1419  // based on the operation, pair will be the minimum / maximum element (or defaultPair if pairs is empty)
1420  PAIR_T pair = std::accumulate( pairs.begin(), pairs.end(), defaultPair,
1421  OP == PairReduction::Min ? getMin : getMax );
1422  return allReduce< FIRST, SECOND, OP >( pair, comm );
1423 }
1424 
1425 template< typename FIRST, typename SECOND >
1427 { return allReduce< FIRST, SECOND, PairReduction::Min >( pair, comm ); }
1428 
1429 template< typename FIRST, typename SECOND, typename CONTAINER >
1430 MpiWrapper::PairType< FIRST, SECOND > MpiWrapper::min( CONTAINER const & pairs, MPI_Comm comm )
1431 { return allReduce< FIRST, SECOND, CONTAINER, PairReduction::Min >( pairs, comm ); }
1432 
1433 template< typename FIRST, typename SECOND >
1435 { return allReduce< FIRST, SECOND, PairReduction::Max >( pair, comm ); }
1436 
1437 template< typename FIRST, typename SECOND, typename CONTAINER >
1438 MpiWrapper::PairType< FIRST, SECOND > MpiWrapper::max( CONTAINER const & pairs, MPI_Comm comm )
1439 { return allReduce< FIRST, SECOND, CONTAINER, PairReduction::Max >( pairs, comm ); }
1440 
1441 } /* namespace geos */
1442 
1443 #endif /* GEOS_COMMON_MPIWRAPPER_HPP_ */
#define GEOS_ERROR(msg)
Raise a hard error and terminate the program.
Definition: Logger.hpp:157
#define GEOS_ERROR_IF(EXP, msg)
Conditionally raise a hard error and terminate the program.
Definition: Logger.hpp:142
#define GEOS_ERROR_IF_NE(lhs, rhs)
Raise a hard error if two values are not equal.
Definition: Logger.hpp:259
#define GEOS_ASSERT_GE(lhs, rhs)
Assert that one value compares greater than or equal to the other in debug builds.
Definition: Logger.hpp:455
#define GEOS_ERROR_IF_LT_MSG(lhs, rhs, msg)
Raise a hard error if one value compares less than the other.
Definition: Logger.hpp:339
#define GEOS_ERROR_IF_NE_MSG(lhs, rhs, msg)
Raise a hard error if two values are not equal.
Definition: Logger.hpp:243
#define GEOS_ASSERT_EQ(lhs, rhs)
Assert that two values compare equal in debug builds.
Definition: Logger.hpp:410
Lightweight non-owning wrapper over a contiguous range of elements.
Definition: Span.hpp:42
constexpr T * data() const noexcept
Definition: Span.hpp:131
constexpr size_type size() const noexcept
Definition: Span.hpp:107
ArrayView< T, 1 > arrayView1d
Alias for 1D array view.
Definition: DataTypes.hpp:180
int MPI_COMM_GEOS
Global MPI communicator used by GEOSX.
GEOS_LOCALINDEX_TYPE localIndex
Local index type (for indexing objects within an MPI partition).
Definition: DataTypes.hpp:85
std::size_t size_t
Unsigned size type.
Definition: DataTypes.hpp:79
Array< T, 1 > array1d
Alias for 1D array.
Definition: DataTypes.hpp:176
static MPI_Op getMpiOp(Reduction const op)
Returns an MPI_Op associated with our strongly typed Reduction enum.
Definition: MpiWrapper.hpp:868
static int activeWaitSomeCompletePhase(const int participants, std::vector< std::tuple< MPI_Request *, MPI_Status *, std::function< MPI_Request(int) > > > const &phases)
static int checkAll(int count, MPI_Request array_of_requests[], int *flag, MPI_Status array_of_statuses[])
static int allgatherv(T_SEND const *sendbuf, int sendcount, T_RECV *recvbuf, int *recvcounts, int *displacements, MPI_Comm comm=MPI_COMM_GEOS)
Strongly typed wrapper around MPI_Allgatherv.
static int activeWaitOrderedCompletePhase(const int participants, std::vector< std::tuple< MPI_Request *, MPI_Status *, std::function< MPI_Request(int) > > > const &phases)
static int check(MPI_Request *request, int *flag, MPI_Status *status)
static int activeWaitAny(const int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[], std::function< MPI_Request(int) > func)
@ Max
Max pair first value.
@ Min
Min pair first value.
static int iSend(T const *const buf, int count, int dest, int tag, MPI_Comm comm, MPI_Request *request)
Strongly typed wrapper around MPI_Isend()
static int bcast(T *buffer, int count, int root, MPI_Comm comm=MPI_COMM_GEOS)
Strongly typed wrapper around MPI_Bcast.
static void allGather(T const myValue, array1d< T > &allValues, MPI_Comm comm=MPI_COMM_GEOS)
Convenience function for MPI_Allgather.
static T max(T const &value, MPI_Comm comm=MPI_COMM_GEOS)
Convenience function for a MPI_Allreduce using a MPI_MAX operation.
static int allgather(T_SEND const *sendbuf, int sendcount, T_RECV *recvbuf, int recvcount, MPI_Comm comm=MPI_COMM_GEOS)
Strongly typed wrapper around MPI_Allgather.
static int activeWaitSome(const int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[], std::function< MPI_Request(int) > func)
static U prefixSum(T const value, MPI_Comm comm=MPI_COMM_GEOS)
Compute exclusive prefix sum and full sum.
static int gather(T const &value, DST_CONTAINER &destValuesBuffer, int root, MPI_Comm comm=MPI_COMM_GEOS)
Strongly typed wrapper around MPI_Gather().
static T allReduce(T const &value, Reduction const op, MPI_Comm comm=MPI_COMM_GEOS)
Convenience wrapper for the MPI_Allreduce function.
static int checkAny(int count, MPI_Request array_of_requests[], int *idx, int *flag, MPI_Status array_of_statuses[])
static int gather(TS const *const sendbuf, int sendcount, TR *const recvbuf, int recvcount, int root, MPI_Comm comm=MPI_COMM_GEOS)
Strongly typed wrapper around MPI_Gather().
static int iRecv(T *const buf, int count, int source, int tag, MPI_Comm comm, MPI_Request *request)
Strongly typed wrapper around MPI_Irecv()
static T sum(T const &value, MPI_Comm comm=MPI_COMM_GEOS)
Convenience function for a MPI_Allreduce using a MPI_SUM operation.
static T min(T const &value, MPI_Comm comm=MPI_COMM_GEOS)
Convenience function for a MPI_Allreduce using a MPI_MIN operation.
static int nodeCommSize()
Compute the number of ranks allocated on the same node.
static int gatherv(TS const *const sendbuf, int sendcount, TR *const recvbuf, const int *recvcounts, const int *displs, int root, MPI_Comm comm=MPI_COMM_GEOS)
Strongly typed wrapper around MPI_Gatherv.
static void finalize()
Free MPI managed resources, then call MPI_Finalize(). Please note that once called,...
static void broadcast(T &value, int srcRank=0, MPI_Comm comm=MPI_COMM_GEOS)
Convenience function for MPI_Broadcast.
static int reduce(T const *sendbuf, T *recvbuf, int count, MPI_Op op, int root, MPI_Comm comm=MPI_COMM_GEOS)
Strongly typed wrapper around MPI_Reduce.
Trait to retrieve the value_type or ValueType of a type T.