QMCPACK
WalkerControlMPI Struct Reference

Class to handle walker controls with simple global sum. More...

+ Inheritance diagram for WalkerControlMPI:
+ Collaboration diagram for WalkerControlMPI:

Public Member Functions

 WalkerControlMPI (Communicate *comm)
 default constructor More...
 
int branch (int iter, MCWalkerConfiguration &W, FullPrecRealType trigger) override
 legacy: perform branch and swap walkers as required More...
 
void swapWalkersSimple (MCWalkerConfiguration &W)
 legacy: swap implementation More...
 
- Public Member Functions inherited from WalkerControlBase
 WalkerControlBase (Communicate *c)
 default constructor More...
 
virtual ~WalkerControlBase ()
 empty destructor to clean up the derived classes More...
 
void start ()
 start a block More...
 
void setWalkerID (MCWalkerConfiguration &walkers)
 start controller and initialize the IDs of walkers More...
 
void measureProperties (int iter)
 take averages and writes to a file More...
 
void setTrialEnergy (FullPrecRealType et)
 set the trial energy More...
 
FullPrecRealType getValue (int i)
 return a value accumulated during a block More...
 
FullPrecRealType getCurrentValue (int i)
 return a current value More...
 
int doNotBranch (int iter, MCWalkerConfiguration &W)
 legacy: return global population update properties without branching More...
 
int sortWalkers (MCWalkerConfiguration &W)
 legacy: sort Walkers between good and bad and prepare branching More...
 
int applyNmaxNmin (int current_population)
 legacy: apply per rank limit Nmax and Nmin More...
 
int copyWalkers (MCWalkerConfiguration &W)
 legacy: copy good walkers to W More...
 
virtual void reset ()
 reset to accumulate data More...
 
bool put (xmlNodePtr cur)
 
void setMinMax (int nw_in, int nmax_in)
 
int get_n_max () const
 
int get_n_min () const
 
FullPrecRealType get_target_sigma () const
 
MCDataType< FullPrecRealType > & get_ensemble_property ()
 
void set_ensemble_property (MCDataType< FullPrecRealType > &ensemble_property)
 
IndexType get_num_contexts () const
 
IndexType get_method () const
 
void set_method (IndexType method)
 
- Public Member Functions inherited from MPIObjectBase
 MPIObjectBase (Communicate *c)
 constructor with communicator More...
 
int rank () const
 return the rank of the communicator More...
 
int getGroupID () const
 return the group id of the communicator More...
 
CommunicategetCommunicator () const
 return myComm More...
 
CommunicategetCommRef () const
 return a TEMPORARY reference to Communicate More...
 
mpi_comm_type getMPI () const
 return MPI communicator if one wants to use MPI directly More...
 
bool is_manager () const
 return true if the rank == 0 More...
 
const std::string & getName () const
 return the name More...
 
void setName (const std::string &aname)
 

Static Public Member Functions

static void determineNewWalkerPopulation (int cur_pop, int num_contexts, int my_context, std::vector< int > &num_per_rank, std::vector< int > &fair_offset, std::vector< int > &minus, std::vector< int > &plus)
 creates the distribution plan More...
 
- Static Public Member Functions inherited from WalkerControlBase
static std::vector< IndexTypesyncFutureWalkersPerRank (Communicate *comm, IndexType n_walkers)
 

Public Attributes

int Cur_pop
 
int Cur_max
 
int Cur_min
 
TimerList_t myTimers
 
IndexType NumWalkersSent
 
friend WalkerControlMPITest
 

Additional Inherited Members

- Public Types inherited from WalkerControlBase
enum  {
  ENERGY_INDEX = 0, ENERGY_SQ_INDEX, WALKERSIZE_INDEX, WEIGHT_INDEX,
  EREF_INDEX, R2ACCEPTED_INDEX, R2PROPOSED_INDEX, FNSIZE_INDEX,
  RNONESIZE_INDEX, RNSIZE_INDEX, B_ENERGY_INDEX, B_WGT_INDEX,
  SENTWALKERS_INDEX, LE_MAX
}
 An enum to access curData and accumData for reduction. More...
 
using Walker_t = MCWalkerConfiguration::Walker_t
 typedef of Walker_t More...
 
using FullPrecRealType = QMCTraits::FullPrecRealType
 typedef of FullPrecRealType More...
 
using IndexType = QMCTraits::IndexType
 typedef of IndexType More...
 
- Public Types inherited from MPIObjectBase
using mpi_comm_type = Communicate::mpi_comm_type
 
- Protected Attributes inherited from WalkerControlBase
IndexType method_
 id for the method More...
 
IndexType n_min_
 minimum number of walkers More...
 
IndexType n_max_
 maximum number of walkers More...
 
IndexType MaxCopy
 maximum copy per walker More...
 
IndexType NumWalkers
 current number of walkers per processor More...
 
FullPrecRealType trialEnergy
 trial energy energy More...
 
FullPrecRealType target_sigma_
 target sigma to limit fluctuations of the trial energy More...
 
std::vector< int > NumPerRank
 number of particle per rank More...
 
std::vector< int > OffSet
 offset of the particle index More...
 
std::vector< int > FairOffSet
 offset of the particle index for a fair distribution More...
 
std::filesystem::path dmcFname
 filename for dmc.dat More...
 
std::unique_ptr< std::ofstream > dmcStream
 file to save energy histogram More...
 
IndexType NumWalkersCreated
 Number of walkers created by this rank. More...
 
IndexType MyContext
 context id More...
 
IndexType num_contexts_
 number of contexts More...
 
IndexType SwapMode
 0 is default More...
 
std::vector< FullPrecRealTypeaccumData
 any accumulated data over a block More...
 
std::vector< FullPrecRealTypecurData
 any temporary data includes many ridiculous conversions of integral types to and from fp More...
 
std::vector< std::unique_ptr< Walker_t > > good_w
 temporary storage for good and bad walkers More...
 
std::vector< std::unique_ptr< Walker_t > > bad_w
 
std::vector< int > ncopy_w
 temporary storage for copy counters More...
 
bool use_nonblocking
 Use non-blocking isend/irecv. More...
 
MCDataType< FullPrecRealTypeensemble_property_
 ensemble properties More...
 
- Protected Attributes inherited from MPIObjectBase
CommunicatemyComm
 pointer to Communicate More...
 
std::string ClassName
 class Name More...
 
std::string myName
 name of the object More...
 

Detailed Description

Class to handle walker controls with simple global sum.

Base class to handle serial mode with branching only

Definition at line 30 of file WalkerControlMPI.h.

Constructor & Destructor Documentation

◆ WalkerControlMPI()

default constructor

Parameters
[in]commcan not be null it is not checked.

set SwapMode? SwapMode is set to 1 but what does that mean? This object persists inside the SFNB which also persists The zeroing here will not happen in later QMC sections... This seems problematic in that NumWalkersSent will start at a value of no concern to the current section.

In the new drivers SFNB should throw an except if there is attempted reuse of WalkerController

Definition at line 61 of file WalkerControlMPI.cpp.

References WalkerControlMPI::Cur_max, WalkerControlMPI::Cur_min, WalkerControlMPI::NumWalkersSent, and WalkerControlBase::SwapMode.

63 {
64  NumWalkersSent = 0;
65  SwapMode = 1;
66  Cur_min = 0;
67  Cur_max = 0;
68 }
WalkerControlBase(Communicate *c)
default constructor
TimerNameList_t< DMC_MPI_Timers > DMCMPITimerNames
TimerManager< NewTimer > & getGlobalTimerManager()
IndexType SwapMode
0 is default

Member Function Documentation

◆ branch()

int branch ( int  iter,
MCWalkerConfiguration W,
FullPrecRealType  trigger 
)
overridevirtual

legacy: perform branch and swap walkers as required

Perform branch and swap walkers as required.

It takes 5 steps:

  1. sortWalkers marks good and bad walkers.
  2. allreduce collects the number of good walkers + copies on every rank.
  3. applyNmaxNmin avoids too large or too small global population.
  4. swapWalkersSimple makes a decision of load balancing and send/recv walkers. Receiving side recycles bad walkers' memory first.
  5. copyWalkers generates copies of good walkers. In order to minimize the memory footprint fluctuation the walker copying is placed as the last step. In order to reduce the time for allocating walker memory, this algorithm does not destroy the bad walkers in step 1. All the bad walkers are recycled as much as possible in step 3/4.

Reimplemented from WalkerControlBase.

Definition at line 85 of file WalkerControlMPI.cpp.

References Communicate::allreduce(), WalkerControlBase::applyNmaxNmin(), WalkerControlBase::copyWalkers(), WalkerControlMPI::Cur_pop, WalkerControlBase::curData, qmcplusplus::DMC_MPI_allreduce, qmcplusplus::DMC_MPI_branch, qmcplusplus::DMC_MPI_copyWalkers, qmcplusplus::DMC_MPI_loadbalance, qmcplusplus::DMC_MPI_prebalance, WalkerControlBase::ensemble_property_, WalkerConfigurations::EnsembleProperty, WalkerControlBase::FairOffSet, WalkerControlBase::LE_MAX, WalkerControlBase::measureProperties(), MPIObjectBase::myComm, WalkerControlBase::MyContext, WalkerControlMPI::myTimers, WalkerControlBase::num_contexts_, WalkerControlBase::NumPerRank, WalkerControlBase::NumWalkers, WalkerControlMPI::NumWalkersSent, WalkerControlBase::SENTWALKERS_INDEX, WalkerControlBase::sortWalkers(), WalkerControlMPI::swapWalkersSimple(), and qmcplusplus::walker.

86 {
87  ScopedTimer local_timer(myTimers[DMC_MPI_branch]);
88  {
90  std::fill(curData.begin(), curData.end(), 0.0);
91  sortWalkers(W);
92  //use NumWalkersSent from the previous exchange
94  //update the number of walkers for this rank
95  //Causes implicit conversion to FullPrecRealType
97  //{ ScopedTimer local_timer(myTimers[DMC_MPI_imbalance]);
98  //}
99  {
100  ScopedTimer local_timer(myTimers[DMC_MPI_allreduce]);
102  }
103  measureProperties(iter);
104  W.EnsembleProperty = ensemble_property_;
105  for (int i = 0, j = LE_MAX; i < num_contexts_; i++, j++)
106  NumPerRank[i] = static_cast<int>(curData[j]);
107  int current_population = std::accumulate(NumPerRank.begin(), NumPerRank.end(), 0);
108 
109  Cur_pop = applyNmaxNmin(current_population);
110  }
111  {
114  }
115  {
117  copyWalkers(W);
118  }
119  //set Weight and Multiplicity to default values
120  for (auto& walker : W)
121  {
122  walker->Weight = 1.0;
123  walker->Multiplicity = 1.0;
124  }
125  //update the walkers offsets
126  W.setWalkerOffsets(FairOffSet);
127 
128  return Cur_pop;
129 }
IndexType NumWalkers
current number of walkers per processor
int copyWalkers(MCWalkerConfiguration &W)
legacy: copy good walkers to W
std::vector< int > NumPerRank
number of particle per rank
ScopeGuard< NewTimer > ScopedTimer
Definition: NewTimer.h:257
std::vector< int > FairOffSet
offset of the particle index for a fair distribution
void allreduce(T &)
Communicate * myComm
pointer to Communicate
Definition: MPIObjectBase.h:62
void measureProperties(int iter)
take averages and writes to a file
std::vector< FullPrecRealType > curData
any temporary data includes many ridiculous conversions of integral types to and from fp ...
IndexType num_contexts_
number of contexts
void swapWalkersSimple(MCWalkerConfiguration &W)
legacy: swap implementation
int sortWalkers(MCWalkerConfiguration &W)
legacy: sort Walkers between good and bad and prepare branching
MCDataType< FullPrecRealType > ensemble_property_
ensemble properties
int applyNmaxNmin(int current_population)
legacy: apply per rank limit Nmax and Nmin

◆ determineNewWalkerPopulation()

void determineNewWalkerPopulation ( int  cur_pop,
int  num_contexts,
int  my_context,
std::vector< int > &  num_per_rank,
std::vector< int > &  fair_offset,
std::vector< int > &  minus,
std::vector< int > &  plus 
)
static

creates the distribution plan

populates the minus and plus vectors they contain 1 copy of a partition index for each adjustment in population to the context.

Todo:
fix this argument salad
Parameters
[in]cur_pop_poppopulation taking multiplicity into account
[in]num_contextsnumber of MPI processes
[in]my_contexti.e this processes MPI rank
[in/out]num_per_rank as if all walkers were copied out to multiplicity
[out]fair_offsetrunning population count at each partition boundary
[out]minuslist of partition indexes one occurrence for each walker removed
[out]pluslist of partition indexes one occurrence for each walker added

Definition at line 132 of file WalkerControlMPI.cpp.

References qmcplusplus::app_error(), and FairDivideLow().

Referenced by WalkerControlMPI::swapWalkersSimple(), and qmcplusplus::TEST_CASE().

139 {
140  FairDivideLow(cur_pop, num_contexts, fair_offset);
141  for (int ip = 0; ip < num_contexts; ip++)
142  {
143  // (FairOffSet[ip + 1] - FairOffSet[ip]) gives the partiion ip walker pop
144  int dn = num_per_rank[ip] - (fair_offset[ip + 1] - fair_offset[ip]);
145  num_per_rank[ip] -= dn;
146  if (dn > 0)
147  {
148  plus.insert(plus.end(), dn, ip);
149  }
150  else if (dn < 0)
151  {
152  minus.insert(minus.end(), -dn, ip);
153  }
154  }
155 #ifndef NDEBUG
156  if (plus.size() != minus.size())
157  {
158  app_error() << "Walker send/recv pattern doesn't match. "
159  << "The send size " << plus.size() << " is not equal to the recv size " << minus.size() << " ."
160  << std::endl;
161  throw std::runtime_error("Trying to swap in WalkerControlMPI::swapWalkersSimple with mismatched queues");
162  }
163 #endif
164 }
std::ostream & app_error()
Definition: OutputManager.h:67
void FairDivideLow(int ntot, int npart, IV &adist)
partition ntot elements among npart
Definition: FairDivide.h:114

◆ swapWalkersSimple()

void swapWalkersSimple ( MCWalkerConfiguration W)

legacy: swap implementation

swap Walkers with Recv/Send or Irecv/Isend

The algorithm ensures that the load per rank can differ only by one walker. Each MPI rank can only send or receive or be silent. The communication is one-dimensional and very local. If multiple copies of a walker need to be sent to the target rank, only send one. The number of copies is communicated ahead via blocking send/recv. Then the walkers are transferred via blocking or non-blocking send/recv. The blocking send/recv may become serialized and worsen load imbalance. Non blocking send/recv algorithm avoids serialization completely.

Definition at line 177 of file WalkerControlMPI.cpp.

References APP_ABORT, qmcplusplus::app_error(), WalkerControlBase::bad_w, WalkerControlMPI::Cur_pop, WalkerControlMPI::determineNewWalkerPopulation(), qmcplusplus::DMC_MPI_recv, qmcplusplus::DMC_MPI_send, WalkerControlBase::FairOffSet, qmcplusplus::for(), WalkerControlBase::good_w, MPIObjectBase::myComm, WalkerControlBase::MyContext, WalkerControlMPI::myTimers, WalkerControlBase::ncopy_w, WalkerControlBase::num_contexts_, WalkerControlBase::NumPerRank, WalkerControlMPI::NumWalkersSent, qmcplusplus::Units::second, and WalkerControlBase::use_nonblocking.

Referenced by WalkerControlMPI::branch().

178 {
179  std::vector<int> minus, plus;
180  //legacy code does not modify NumPerRank in this call so we copy NumPerRank
181  std::vector<int> num_per_rank(NumPerRank);
183 
184  if (good_w.empty() && bad_w.empty())
185  {
186  app_error() << "It should never happen that no walkers, "
187  << "neither good nor bad, exist on a rank. "
188  << "Please report to developers. " << std::endl;
189  APP_ABORT("WalkerControlMPI::swapWalkersSimple no existing walker");
190  }
191 
192  Walker_t& wRef(*(good_w.empty() ? bad_w[0] : good_w[0]));
193  std::vector<std::unique_ptr<Walker_t>> newW;
194  std::vector<int> ncopy_newW;
195 #ifdef MCWALKERSET_MPI_DEBUG
196  std::array<char, 128> fname;
197  if (std::snprintf(fname.data(), fname.size() "test.%d", MyContext) < 0)
198  throw std::runtime_error("Error generating filename");
199  std::ofstream fout(fname.data(), std::ios::app);
200  //fout << NumSwaps << " " << Cur_pop << " ";
201  //for(int ic=0; ic<NumContexts; ic++) fout << NumPerRank[ic] << " ";
202  //fout << " | ";
203  //for(int ic=0; ic<NumContexts; ic++) fout << FairOffSet[ic+1]-FairOffSet[ic] << " ";
204  //fout << " | ";
205  for (int ic = 0; ic < plus.size(); ic++)
206  {
207  fout << plus[ic] << " ";
208  }
209  fout << " | ";
210  for (int ic = 0; ic < minus.size(); ic++)
211  {
212  fout << minus[ic] << " ";
213  }
214  fout << std::endl;
215 #endif
216  int nswap = plus.size();
217  // sort good walkers by the number of copies
218  assert(good_w.size() == ncopy_w.size());
219  std::vector<std::pair<int, int>> ncopy_pairs;
220  for (int iw = 0; iw < ncopy_w.size(); iw++)
221  ncopy_pairs.push_back(std::make_pair(ncopy_w[iw], iw));
222  std::sort(ncopy_pairs.begin(), ncopy_pairs.end());
223 
224  int nsend = 0;
225  struct job
226  {
227  const int walkerID;
228  const int target;
229  job(int wid, int target_in) : walkerID(wid), target(target_in){};
230  };
231  std::vector<job> job_list;
232  for (int ic = 0; ic < nswap; ic++)
233  {
234  if (plus[ic] == MyContext)
235  {
236  // always send the last good walker
237  auto& awalker = good_w[ncopy_pairs.back().second];
238  // count the possible copies in one send
239  int nsentcopy = 0;
240 
241  for (int id = ic + 1; id < nswap; id++)
242  if (plus[ic] == plus[id] && minus[ic] == minus[id] && ncopy_pairs.back().first > 0)
243  { // increment copy counter
244  ncopy_pairs.back().first--;
245  nsentcopy++;
246  }
247  else
248  { // not enough copies to send or not the same send/recv pair
249  break;
250  }
251 
252  // send the number of copies to the target
253  myComm->comm.send_value(nsentcopy, minus[ic]);
254  job_list.push_back(job(ncopy_pairs.back().second, minus[ic]));
255 #ifdef MCWALKERSET_MPI_DEBUG
256  fout << "rank " << plus[ic] << " sends a walker with " << nsentcopy << " copies to rank " << minus[ic]
257  << std::endl;
258 #endif
259 
260  // update counter and cursor
261  ++nsend;
262  ic += nsentcopy;
263 
264  // update copy counter
265  if (ncopy_pairs.back().first > 0)
266  {
267  ncopy_pairs.back().first--;
268  std::sort(ncopy_pairs.begin(), ncopy_pairs.end());
269  }
270  else
271  {
272  ncopy_pairs.pop_back();
273  bad_w.push_back(std::make_unique<Walker_t>(*awalker));
274  }
275  }
276  if (minus[ic] == MyContext)
277  {
278  std::unique_ptr<Walker_t> awalker;
279  if (!bad_w.empty())
280  {
281  awalker = std::move(bad_w.back());
282  bad_w.pop_back();
283  }
284 
285  int nsentcopy = 0;
286  // recv the number of copies from the target
287  myComm->comm.receive_n(&nsentcopy, 1, plus[ic]);
288  job_list.push_back(job(newW.size(), plus[ic]));
289  if (plus[ic] != plus[ic + nsentcopy] || minus[ic] != minus[ic + nsentcopy])
290  APP_ABORT("WalkerControlMPI::swapWalkersSimple send/recv pair checking failed!");
291 #ifdef MCWALKERSET_MPI_DEBUG
292  fout << "rank " << minus[ic] << " recvs a walker with " << nsentcopy << " copies from rank " << plus[ic]
293  << std::endl;
294 #endif
295 
296  // save the new walker
297  if (awalker)
298  {
299  newW.push_back(std::make_unique<Walker_t>(*awalker));
300  }
301  else
302  {
303  newW.push_back(nullptr);
304  }
305  ncopy_newW.push_back(nsentcopy);
306  // update cursor
307  ic += nsentcopy;
308  }
309  }
310 
311  if (nsend > 0)
312  {
313  std::vector<mpi3::request> requests;
314  // mark all walkers not in send
315  for (auto jobit = job_list.begin(); jobit != job_list.end(); jobit++)
316  good_w[jobit->walkerID]->SendInProgress = false;
317  for (auto jobit = job_list.begin(); jobit != job_list.end(); jobit++)
318  {
319  // pack data and send
320  auto& awalker = good_w[jobit->walkerID];
321  size_t byteSize = awalker->byteSize();
322  if (!awalker->SendInProgress)
323  {
324  awalker->updateBuffer();
325  awalker->SendInProgress = true;
326  }
327  if (use_nonblocking)
328  requests.push_back(myComm->comm.isend_n(awalker->DataSet.data(), byteSize, jobit->target));
329  else
330  {
331  ScopedTimer local_timer(myTimers[DMC_MPI_send]);
332  myComm->comm.send_n(awalker->DataSet.data(), byteSize, jobit->target);
333  }
334  }
335  if (use_nonblocking)
336  {
337  // wait all the isend
338  for (int im = 0; im < requests.size(); im++)
339  {
340  ScopedTimer local_timer(myTimers[DMC_MPI_send]);
341  requests[im].wait();
342  }
343  requests.clear();
344  }
345  }
346  else
347  {
348  std::vector<mpi3::request> requests;
349  for (auto jobit = job_list.begin(); jobit != job_list.end(); jobit++)
350  {
351  // recv and unpack data
352  auto& awalker = newW[jobit->walkerID];
353  if (!awalker)
354  awalker = std::make_unique<Walker_t>(wRef);
355  size_t byteSize = awalker->byteSize();
356  if (use_nonblocking)
357  requests.push_back(myComm->comm.ireceive_n(awalker->DataSet.data(), byteSize, jobit->target));
358  else
359  {
360  ScopedTimer local_timer(myTimers[DMC_MPI_recv]);
361  myComm->comm.receive_n(awalker->DataSet.data(), byteSize, jobit->target);
362  awalker->copyFromBuffer();
363  }
364  }
365  if (use_nonblocking)
366  {
367  std::vector<bool> not_completed(requests.size(), true);
368  bool completed = false;
369  while (!completed)
370  {
371  completed = true;
372  for (int im = 0; im < requests.size(); im++)
373  if (not_completed[im])
374  {
375  if (requests[im].completed())
376  {
377  newW[job_list[im].walkerID]->copyFromBuffer();
378  not_completed[im] = false;
379  }
380  else
381  completed = false;
382  }
383  }
384  requests.clear();
385  }
386  }
387  //save the number of walkers sent
388  NumWalkersSent = nsend;
389  // rebuild good_w and ncopy_w
390  std::vector<std::unique_ptr<Walker_t>> good_w_temp(std::move(good_w));
391  good_w.resize(ncopy_pairs.size());
392  ncopy_w.resize(ncopy_pairs.size());
393  for (int iw = 0; iw < ncopy_pairs.size(); iw++)
394  {
395  good_w[iw] = std::move(good_w_temp[ncopy_pairs[iw].second]);
396  ncopy_w[iw] = ncopy_pairs[iw].first;
397  }
398  //add walkers from other rank
399  if (newW.size())
400  {
401  good_w.insert(good_w.end(), std::make_move_iterator(newW.begin()), std::make_move_iterator(newW.end()));
402  ncopy_w.insert(ncopy_w.end(), ncopy_newW.begin(), ncopy_newW.end());
403  }
404 
405  assert(std::accumulate(ncopy_w.begin(), ncopy_w.end(), ncopy_w.size()) == num_per_rank[MyContext]);
406 }
std::vector< std::unique_ptr< Walker_t > > bad_w
std::vector< int > NumPerRank
number of particle per rank
std::ostream & app_error()
Definition: OutputManager.h:67
ScopeGuard< NewTimer > ScopedTimer
Definition: NewTimer.h:257
std::vector< int > ncopy_w
temporary storage for copy counters
static void determineNewWalkerPopulation(int cur_pop, int num_contexts, int my_context, std::vector< int > &num_per_rank, std::vector< int > &fair_offset, std::vector< int > &minus, std::vector< int > &plus)
creates the distribution plan
MCWalkerConfiguration::Walker_t Walker_t
typedef of Walker_t
for(int i=0;i< size_test;++i) CHECK(Approx(gauss_random_vals[offset_for_rs+i])
bool use_nonblocking
Use non-blocking isend/irecv.
std::vector< int > FairOffSet
offset of the particle index for a fair distribution
Communicate * myComm
pointer to Communicate
Definition: MPIObjectBase.h:62
#define APP_ABORT(msg)
Widely used but deprecated fatal error macros from legacy code.
Definition: AppAbort.h:27
IndexType num_contexts_
number of contexts
std::vector< std::unique_ptr< Walker_t > > good_w
temporary storage for good and bad walkers

Member Data Documentation

◆ Cur_max

int Cur_max

Definition at line 33 of file WalkerControlMPI.h.

Referenced by WalkerControlMPI::WalkerControlMPI().

◆ Cur_min

int Cur_min

Definition at line 34 of file WalkerControlMPI.h.

Referenced by WalkerControlMPI::WalkerControlMPI().

◆ Cur_pop

int Cur_pop

◆ myTimers

TimerList_t myTimers

◆ NumWalkersSent

◆ WalkerControlMPITest

friend WalkerControlMPITest

Definition at line 77 of file WalkerControlMPI.h.


The documentation for this struct was generated from the following files: