30 #include "mpi3/shared_communicator.hpp" 62 std::vector<int> nplist(nparts + 1);
65 comm = in_comm.comm.split(p, in_comm.
rank());
75 mpi3::communicator leader_comm = in_comm.comm.subcomm(nplist);
92 static bool has_finalized =
false;
120 : myMPI(MPI_COMM_NULL), d_mycontext(0), d_ncontexts(1), d_groupid(0)
129 std::cerr <<
"Fatal Error. Aborting at " << msg << std::endl;
virtual ~Communicate()
destructor Call proper finalization of Communication library
void initialize(int argc, char **argv)
int rank() const
return the rank
void cleanupMessage(void *)
mpi_comm_type myMPI
Raw communicator.
Communicate * Controller
Global Communicator for a process.
int size() const
return the number of tasks
void FairDivideLow(int ntot, int npart, IV &adist)
partition ntot elements among npart
Wrapping information on parallelism.
A collection of functions for dividing fairly.
bool isGroupLeader()
return true if the current MPI rank is the group lead
int d_ngroups
Total number of groups in the parent communicator.
int d_groupid
Group ID of the current communicator in the parent communicator.
Communicate NodeComm() const
provide a node/shared-memory communicator from current (parent) communicator
std::unique_ptr< Communicate > GroupLeaderComm
Group Leader Communicator.
void barrier_and_abort(const std::string &msg) const