QMCPACK
Communicate.h
Go to the documentation of this file.
1 //////////////////////////////////////////////////////////////////////////////////////
2 // This file is distributed under the University of Illinois/NCSA Open Source License.
3 // See LICENSE file in top directory for details.
4 //
5 // Copyright (c) 2022 QMCPACK developers.
6 //
7 // File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
8 // Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
9 // Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
10 // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
11 // Mark Dewing, markdewing@gmail.com, University of Illinois at Urbana-Champaign
12 // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
13 // Peter Doak, doakpw@ornl.gov, Oak Ridge National Laboratory
14 // Alfredo A. Correa, correaa@llnl.gov, Lawrence Livermore National Laboratory
15 //
16 // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
17 //////////////////////////////////////////////////////////////////////////////////////
18 
19 
20 #ifndef OHMMS_COMMUNICATE_H
21 #define OHMMS_COMMUNICATE_H
22 
23 #ifdef HAVE_CONFIG_H
24 #include "config.h"
25 #endif
26 
27 #ifdef HAVE_MPI
28 #include "mpi3/environment.hpp"
29 namespace mpi3 = boost::mpi3;
30 #endif
31 
32 #ifdef HAVE_MPI
33 struct CommunicatorTraits
34 {
35  using mpi_comm_type = MPI_Comm;
36  using status = MPI_Status;
37  using request = MPI_Request;
38 };
39 
40 #else
42 {
43  using mpi_comm_type = int;
44  using status = int;
45  using request = int;
46  static const int MPI_COMM_NULL = 0;
47  static const int MPI_REQUEST_NULL = 1;
48 };
49 #endif
50 
51 #include <memory>
52 #include <string>
53 #include <vector>
54 #include <utility>
55 #include <unistd.h>
56 #include <cstring>
57 
58 #include "Message/AppAbort.h"
59 
60 /**@class Communicate
61  * @ingroup Message
62  * @brief
63  * Wrapping information on parallelism.
64  * Very limited in functions. Currently, only single-mode or mpi-mode
65  * is available (mutually exclusive).
66  * @todo Possibly, make it a general manager class for mpi+openmp, mpi+mpi
67  */
69 {
70 public:
71  ///constructor
72  Communicate();
73 
74 #ifdef HAVE_MPI
75  ///constructor with communicator
76  Communicate(mpi3::communicator& in_comm);
77  Communicate(mpi3::communicator&& in_comm);
78 #endif
79 
80  /** constructor that splits in_comm
81  */
82  Communicate(const Communicate& in_comm, int nparts);
83 
84  /**destructor
85  * Call proper finalization of Communication library
86  */
87  virtual ~Communicate();
88 
89  ///disable constructor
90  Communicate(const Communicate&) = delete;
91 
92  // Only for unit tests
93  void initialize(int argc, char** argv);
94 
95 #ifdef HAVE_MPI
96  void initialize(const mpi3::environment& env);
97 #endif
98  /// provide a node/shared-memory communicator from current (parent) communicator
99  Communicate NodeComm() const;
100 
101  void finalize();
102  void barrier() const;
103  void abort() const;
104  void barrier_and_abort(const std::string& msg) const;
105  void set_world();
106 
107 #if defined(HAVE_MPI)
108  ///operator for implicit conversion to MPI_Comm
109  operator MPI_Comm() const { return myMPI; }
110 #endif
111 
112  ///return the Communicator ID (typically MPI_WORLD_COMM)
113  mpi_comm_type getMPI() const { return myMPI; }
114 
115  ///return the rank
116  int rank() const { return d_mycontext; }
117  ///return the number of tasks
118  int size() const { return d_ncontexts; }
119 
120  ///return the group id
121  int getGroupID() const { return d_groupid; }
122  ///return the number of intra_comms which belong to the same group
123  int getNumGroups() const { return d_ngroups; }
124 
125  void cleanupMessage(void*);
126  void setNodeID(int i) { d_mycontext = i; }
127  void setNumNodes(int n) { d_ncontexts = n; }
128 
129  void setName(const std::string& aname) { myName = aname; }
130  void setName(const char* aname, int alen) { myName = std::string(aname, alen); }
131  const std::string& getName() const { return myName; }
132 
133  ///return true if the current MPI rank is the group lead
134  bool isGroupLeader() { return d_mycontext == 0; }
135 
136  // MMORALES: leaving this here temprarily, but it doesn;t belong here.
137  // MMORALES: FIX FIX FIX
138 #ifdef HAVE_MPI
139 
140 // For Mac OS X
141 #ifndef HOST_NAME_MAX
142 #ifdef _POSIX_HOST_NAME_MAX
143 #define HOST_NAME_MAX _POSIX_HOST_NAME_MAX
144 #endif
145 #endif
146 
147 #endif
148 
149 #ifdef HAVE_MPI
150  /** A hack to get around Communicate not supporting flexible processor subgroups
151  *
152  * MMORALES:
153  * right now there is no easy way to use Communicate
154  * for generic processor subgroups, so calling split on myMPI
155  * and managing the communicator directly
156  * \todo THIS MUST BE FIXED!!!
157  */
158  void split_comm(int key, MPI_Comm& comm)
159  {
160  int myrank = rank();
161  MPI_Comm_split(myMPI, key, myrank, &comm);
162  }
163 #endif
164 
165  template<typename T>
166  void allreduce(T&);
167  template<typename T>
168  void reduce(T&);
169  template<typename T>
170  void reduce(T* restrict, T* restrict, int n);
171  template<typename T>
172  void reduce_in_place(T* restrict, int n);
173  template<typename T>
174  void bcast(T&);
175  template<typename T>
176  void bcast(T* restrict, int n);
177  template<typename T>
178  void send(int dest, int tag, T&);
179  template<typename T>
180  void gather(T& sb, T& rb, int dest = 0);
181  template<typename T, typename IT>
182  void gatherv(T& sb, T& rb, IT& counts, IT& displ, int dest = 0);
183  template<typename T>
184  void allgather(T& sb, T& rb, int count);
185  template<typename T, typename IT>
186  void allgatherv(T& sb, T& rb, IT& counts, IT& displ);
187  template<typename T>
188  void scatter(T& sb, T& rb, int dest = 0);
189  template<typename T, typename IT>
190  void scatterv(T& sb, T& rb, IT& counts, IT& displ, int source = 0);
191  template<typename T>
192  request irecv(int source, int tag, T&);
193  template<typename T>
194  request isend(int dest, int tag, T&);
195  template<typename T>
196  request irecv(int source, int tag, T*, int n);
197  template<typename T>
198  request isend(int dest, int tag, T*, int n);
199  template<typename T, typename IT>
200  void gatherv(T* sb, T* rb, int n, IT& counts, IT& displ, int dest = 0);
201  template<typename T, typename TMPI, typename IT>
202  void gatherv_in_place(T* buf, TMPI& datatype, IT& counts, IT& displ, int dest = 0);
203  template<typename T>
204  void allgather(T* sb, T* rb, int count);
205  template<typename T>
206  void gsum(T&);
207 
208 protected:
209  /** Raw communicator
210  *
211  * Currently it is only owned by Communicate which manages its creation and destruction
212  * After switching to mpi3::communicator, myMPI is only a reference to the raw communicator owned by mpi3::communicator
213  */
215  /// Communicator name
216  std::string myName;
217  /// Rank
219  /// Size
221  /// Group ID of the current communicator in the parent communicator
223  /// Total number of groups in the parent communicator
225  /// Group Leader Communicator
226  std::unique_ptr<Communicate> GroupLeaderComm;
227 
228 public:
229  // Avoid public access to unique_ptr.
231 
232 #ifdef HAVE_MPI
233  /// mpi3 communicator wrapper
234  mutable mpi3::communicator comm;
235 #endif
236 };
237 
238 
239 namespace OHMMS
240 {
241 /** Global Communicator for a process
242  */
243 extern Communicate* Controller;
244 } // namespace OHMMS
245 
246 
247 #endif // OHMMS_COMMUNICATE_H
virtual ~Communicate()
destructor Call proper finalization of Communication library
void initialize(int argc, char **argv)
request isend(int dest, int tag, T &)
request irecv(int source, int tag, T &)
void reduce(T &)
void barrier() const
int rank() const
return the rank
Definition: Communicate.h:116
void send(int dest, int tag, T &)
void gatherv(T &sb, T &rb, IT &counts, IT &displ, int dest=0)
int d_mycontext
Rank.
Definition: Communicate.h:218
void cleanupMessage(void *)
mpi_comm_type myMPI
Raw communicator.
Definition: Communicate.h:214
Communicate * Controller
Global Communicator for a process.
Definition: Communicate.cpp:35
int size() const
return the number of tasks
Definition: Communicate.h:118
Wrapping information on parallelism.
Definition: Communicate.h:68
void allreduce(T &)
int getGroupID() const
return the group id
Definition: Communicate.h:121
const std::string & getName() const
Definition: Communicate.h:131
std::string myName
Communicator name.
Definition: Communicate.h:216
void allgather(T &sb, T &rb, int count)
int d_ncontexts
Size.
Definition: Communicate.h:220
void setName(const char *aname, int alen)
Definition: Communicate.h:130
void gather(T &sb, T &rb, int dest=0)
bool isGroupLeader()
return true if the current MPI rank is the group lead
Definition: Communicate.h:134
void gsum(T &)
void gatherv_in_place(T *buf, TMPI &datatype, IT &counts, IT &displ, int dest=0)
int d_ngroups
Total number of groups in the parent communicator.
Definition: Communicate.h:224
int d_groupid
Group ID of the current communicator in the parent communicator.
Definition: Communicate.h:222
void set_world()
void setName(const std::string &aname)
Definition: Communicate.h:129
Communicate NodeComm() const
provide a node/shared-memory communicator from current (parent) communicator
int getNumGroups() const
return the number of intra_comms which belong to the same group
Definition: Communicate.h:123
void scatter(T &sb, T &rb, int dest=0)
void abort() const
void finalize()
void setNodeID(int i)
Definition: Communicate.h:126
std::unique_ptr< Communicate > GroupLeaderComm
Group Leader Communicator.
Definition: Communicate.h:226
Communicate()
constructor
Definition: Communicate.cpp:38
void reduce_in_place(T *restrict, int n)
mpi_comm_type getMPI() const
return the Communicator ID (typically MPI_WORLD_COMM)
Definition: Communicate.h:113
Communicate * getGroupLeaderComm()
Definition: Communicate.h:230
static const int MPI_REQUEST_NULL
Definition: Communicate.h:47
void bcast(T &)
void setNumNodes(int n)
Definition: Communicate.h:127
void barrier_and_abort(const std::string &msg) const
static const int MPI_COMM_NULL
Definition: Communicate.h:46
void allgatherv(T &sb, T &rb, IT &counts, IT &displ)
void scatterv(T &sb, T &rb, IT &counts, IT &displ, int source=0)