QMCPACK
WalkerControl.cpp
Go to the documentation of this file.
1 //////////////////////////////////////////////////////////////////////////////////////
2 // This file is distributed under the University of Illinois/NCSA Open Source License.
3 // See LICENSE file in top directory for details.
4 //
5 // Copyright (c) 2020 QMCPACK developers.
6 //
7 // File developed by: Peter Doak, doakpw@ornl.gov, Oak Ridge National Laboratory
8 // Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
9 // Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
10 // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
11 // Ye Luo, yeluo@anl.gov, Argonne National Laboratory
12 // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
13 //
14 // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
15 //////////////////////////////////////////////////////////////////////////////////////
16 
17 
18 #include <array>
19 #include <cassert>
20 #include <stdexcept>
21 #include <numeric>
22 #include <sstream>
23 
24 #include "WalkerControl.h"
26 #include "OhmmsData/ParameterSet.h"
29 
30 namespace qmcplusplus
31 {
33 
35 {
45 };
46 
48  {WC_imbalance, "WalkerControl::imbalance"},
49  {WC_prebalance, "WalkerControl::pre-loadbalance"},
50  {WC_copyWalkers, "WalkerControl::copyWalkers"},
51  {WC_recomputing, "WalkerControl::recomputing"},
52  {WC_allreduce, "WalkerControl::allreduce"},
53  {WC_loadbalance, "WalkerControl::loadbalance"},
54  {WC_send, "WalkerControl::send"},
55  {WC_recv, "WalkerControl::recv"}};
56 
58  : MPIObjectBase(c),
59  rng_(rng),
60  use_fixed_pop_(use_fixed_pop),
61  n_min_(1),
62  n_max_(10),
63  max_copy_(2),
64  rank_num_(c->rank()),
65  num_ranks_(c->size()),
66  SwapMode(0),
67  use_nonblocking_(true),
68  debug_disable_branching_(false),
70  saved_num_walkers_sent_(0)
71 {
72  num_per_rank_.resize(num_ranks_);
73  fair_offset_.resize(num_ranks_ + 1);
74 }
75 
77 
79 {
80  if (rank_num_ == 0)
81  {
82  std::filesystem::path hname(myComm->getName());
83  hname.concat(".dmc.dat");
84  if (hname != dmcFname)
85  {
86  dmcStream = std::make_unique<std::ofstream>(hname);
87  dmcStream->setf(std::ios::scientific, std::ios::floatfield);
88  dmcStream->precision(10);
89  (*dmcStream) << "# Index " << std::setw(20) << "LocalEnergy" << std::setw(20) << "Variance" << std::setw(20)
90  << "Weight" << std::setw(20) << "NumOfWalkers" << std::setw(20)
91  << "AvgSentWalkers"; //add the number of walkers
92  (*dmcStream) << std::setw(20) << "TrialEnergy" << std::setw(20) << "DiffEff";
93  (*dmcStream) << std::setw(20) << "LivingFraction";
94  (*dmcStream) << std::endl;
95  dmcFname = std::move(hname);
96  }
97  }
98 }
99 
100 void WalkerControl::writeDMCdat(int iter, const std::vector<FullPrecRealType>& curData)
101 {
102  //taking average over the walkers
103  FullPrecRealType wgtInv(1.0 / curData[WEIGHT_INDEX]);
104  FullPrecRealType eavg = curData[ENERGY_INDEX] * wgtInv;
105  ensemble_property_.Energy = eavg;
107  ensemble_property_.Variance = (curData[ENERGY_SQ_INDEX] * wgtInv - eavg * eavg);
112  static_cast<FullPrecRealType>(curData[FNSIZE_INDEX]) / static_cast<FullPrecRealType>(curData[WALKERSIZE_INDEX]);
114  // \\todo If WalkerControl is not exclusively for dmc then this shouldn't be here.
115  // If it is it shouldn't be in QMDrivers but QMCDrivers/DMC
116  if (dmcStream)
117  {
118  //boost::archive::text_oarchive oa(*dmcStream);
119  //(*oa) & iter & eavg_cur & wgt_cur & Etrial & pop_old;
120  (*dmcStream) << std::setw(10) << iter << std::setw(20) << ensemble_property_.Energy << std::setw(20)
121  << ensemble_property_.Variance << std::setw(20) << ensemble_property_.Weight << std::setw(20)
122  << ensemble_property_.NumSamples << std::setw(20)
123  << curData[SENTWALKERS_INDEX] / static_cast<double>(num_ranks_);
124  (*dmcStream) << std::setw(20) << trial_energy_ << std::setw(20)
126  (*dmcStream) << std::setw(20) << ensemble_property_.LivingFraction;
127  // Work around for bug with deterministic scalar trace test on select compiler/architectures.
128  // While WalkerControl appears to have exclusive ownership of the dmcStream pointer,
129  // this is not actually true. Apparently it doesn't actually and can loose ownership then it is
130  // either leaked or not flushed before it is destroyed.
131  // \todo fix this, you don't want to flush every step since you really hope that could be very rapid.
132  (*dmcStream)
133  << std::endl; //'\n'; // this is definitely not a place to put an endl as that is also a signal for a flush.
134  }
135 }
136 
137 void WalkerControl::branch(int iter, MCPopulation& pop, bool do_not_branch)
138 {
140  do_not_branch = true;
141  /* dynamic population
142  1. compute multiplicity. If iter 0, multiplicity = 1
143  2. compute curData, collect multiplicity on every rank
144 
145  fix population
146  1. compute curData, collect weight on every rank
147  2. compute multiplicity by comb method
148 
149  3. figure out final distribution, apply walker count ceiling
150  4. collect good, bad walkers
151  5. communicate walkers
152  6. unpack received walkers, apply walker count floor
153  */
154 
155  ScopedTimer branch_timer(my_timers_[WC_branch]);
156  auto& walkers = pop.get_walkers();
157 
158  {
159  ScopedTimer prebalance_timer(my_timers_[WC_prebalance]);
160  ///any temporary data includes many ridiculous conversions of integral types to and from fp
161  std::vector<FullPrecRealType> curData(LE_MAX + num_ranks_, 0.0);
162 
163  if (use_fixed_pop_)
164  {
166  // convert node local num of walkers after combing
167  // curData[LE_MAX + rank_num_] = wsum to num_total_copies
168  // calculate walker->Multiplicity;
169  }
170  else
171  {
172  // no branching at the first iteration to avoid large population change.
173  if (do_not_branch)
174  for (auto& walker : walkers)
175  walker->Multiplicity = 1.0;
176  else
177  for (auto& walker : walkers)
178  walker->Multiplicity = static_cast<int>(walker->Weight + rng_());
180  for (int i = 0, j = LE_MAX; i < num_ranks_; i++, j++)
181  num_per_rank_[i] = static_cast<int>(curData[j]);
182  }
183  // at this point, curData[LE_MAX + rank_num_] and walker->Multiplicity are ready.
184 
185  writeDMCdat(iter, curData);
187  }
188 
189  auto untouched_walkers = walkers.size();
190 #if defined(HAVE_MPI)
191  {
192  ScopedTimer loadbalance_timer(my_timers_[WC_loadbalance]);
193  // kill walkers, actually put them in deadlist for be recycled for receiving walkers
195  // ranks receiving walkers from other ranks have the lowest walker count now.
196  untouched_walkers = std::min(untouched_walkers, walkers.size());
197 
198  // load balancing over MPI
199  swapWalkersSimple(pop);
200  }
201 #endif
202 
203  // kill dead walker to be recycled by the following copy
205  // ranks sending walkers from other ranks have the lowest walker count now.
206  untouched_walkers = std::min(untouched_walkers, walkers.size());
207 
208  { // copy good walkers
209  ScopedTimer copywalkers_timer(my_timers_[WC_copyWalkers]);
210  const size_t good_walkers = walkers.size();
211  for (size_t iw = 0; iw < good_walkers; iw++)
212  {
213  size_t num_copies = static_cast<int>(walkers[iw]->Multiplicity);
214  while (num_copies > 1)
215  {
216  auto walker_elements = pop.spawnWalker();
217  // save this walkers ID
218  // \todo revisit Walker assignment operator after legacy drivers removed.
219  // but in the modern scheme walker IDs are permanent after creation, what walker they
220  // were copied from is in ParentID.
221  long save_id = walker_elements.walker.getWalkerID();
222  walker_elements.walker = *walkers[iw];
223  walker_elements.walker.setParentID(walker_elements.walker.getWalkerID());
224  walker_elements.walker.setWalkerID(save_id);
225  num_copies--;
226  }
227  }
228  }
229 
230  const int current_num_global_walkers = std::accumulate(num_per_rank_.begin(), num_per_rank_.end(), 0);
231  pop.set_num_global_walkers(current_num_global_walkers);
232 #ifndef NDEBUG
233  pop.checkIntegrity();
235  if (current_num_global_walkers != pop.get_num_global_walkers())
236  throw std::runtime_error("Potential bug! Population num_global_walkers mismatched!");
237 #endif
238 
239  if (!do_not_branch)
240  for (UPtr<MCPWalker>& walker : pop.get_walkers())
241  {
242  walker->Weight = 1.0;
243  walker->Multiplicity = 1.0;
244  }
245 
246  for (int iw = 0; iw < untouched_walkers; iw++)
247  pop.get_walkers()[iw]->wasTouched = false;
248 
249  for (int iw = untouched_walkers; iw < pop.get_num_local_walkers(); iw++)
250  pop.get_walkers()[iw]->wasTouched = true;
251 }
252 
253 void WalkerControl::computeCurData(const UPtrVector<MCPWalker>& walkers, std::vector<FullPrecRealType>& curData)
254 {
255  FullPrecRealType esum = 0.0, e2sum = 0.0, wsum = 0.0;
256  FullPrecRealType r2_accepted = 0.0, r2_proposed = 0.0;
257  int num_good_walkers(0), num_total_copies(0);
258  for (const auto& walker : walkers)
259  {
260  const int num_copies = static_cast<int>(walker->Multiplicity);
261  num_good_walkers += num_copies > 0 ? 1 : 0;
262  num_total_copies += num_copies;
263  // Ye : not sure about these r2
264  r2_accepted += walker->Properties(WP::R2ACCEPTED);
265  r2_proposed += walker->Properties(WP::R2PROPOSED);
266  FullPrecRealType e = walker->Properties(WP::LOCALENERGY);
267  FullPrecRealType wgt = walker->Weight;
268  esum += wgt * e;
269  e2sum += wgt * e * e;
270  wsum += wgt;
271  }
272  //temp is an array to perform reduction operations
273  std::fill(curData.begin(), curData.end(), 0.0);
274  curData[ENERGY_INDEX] = esum;
275  curData[ENERGY_SQ_INDEX] = e2sum;
276  curData[WALKERSIZE_INDEX] = walkers.size(); // num of all the current walkers (good+bad)
277  curData[WEIGHT_INDEX] = wsum;
278  curData[R2ACCEPTED_INDEX] = r2_accepted;
279  curData[R2PROPOSED_INDEX] = r2_proposed;
280  curData[FNSIZE_INDEX] = num_good_walkers; // num of good walkers before branching
282  if (use_fixed_pop_)
283  curData[LE_MAX + rank_num_] = wsum; // node sum of walker weights
284  else
285  curData[LE_MAX + rank_num_] = num_total_copies; // node num of walkers after local branching
286 
287  {
288  ScopedTimer allreduce_timer(my_timers_[WC_allreduce]);
290  }
291 }
292 
293 // determine new walker population on each node
294 void WalkerControl::determineNewWalkerPopulation(const std::vector<int>& num_per_rank,
295  std::vector<int>& fair_offset,
296  std::vector<int>& minus,
297  std::vector<int>& plus)
298 {
299  const int num_contexts = num_per_rank.size();
300  const int current_population = std::accumulate(num_per_rank.begin(), num_per_rank.end(), 0);
301  FairDivideLow(current_population, num_contexts, fair_offset);
302  for (int ip = 0; ip < num_contexts; ip++)
303  {
304  int dn = num_per_rank[ip] - (fair_offset[ip + 1] - fair_offset[ip]);
305  if (dn > 0)
306  plus.insert(plus.end(), dn, ip);
307  else if (dn < 0)
308  minus.insert(minus.end(), -dn, ip);
309  }
310 #ifndef NDEBUG
311  if (plus.size() != minus.size())
312  {
313  app_error() << "Walker send/recv pattern doesn't match. "
314  << "The send size " << plus.size() << " is not equal to the recv size " << minus.size() << " ."
315  << std::endl;
316  throw std::runtime_error("Trying to swap in WalkerControl::swapWalkersSimple with mismatched queues");
317  }
318 #endif
319 }
320 
321 #if defined(HAVE_MPI)
322 void WalkerControl::swapWalkersSimple(MCPopulation& pop)
323 {
324  std::vector<int> minus, plus;
326 
327 #ifdef MCWALKERSET_MPI_DEBUG
328  std::array<char, 128> fname;
329  if (std::snprintf(fname.data(), fname.size(), "test.%d", rank_num_) < 0)
330  throw std::runtime_error("Error generating filename");
331  std::ofstream fout(fname.data(), std::ios::app);
332 
333  for (int ic = 0; ic < plus.size(); ic++)
334  {
335  fout << plus[ic] << " ";
336  }
337  fout << " | ";
338  for (int ic = 0; ic < minus.size(); ic++)
339  {
340  fout << minus[ic] << " ";
341  }
342  fout << std::endl;
343 #endif
344 
345  auto& good_walkers = pop.get_walkers();
346  const int nswap = plus.size();
347  // sort good walkers by the number of copies
348  std::vector<std::pair<int, int>> ncopy_pairs;
349  for (int iw = 0; iw < good_walkers.size(); iw++)
350  ncopy_pairs.push_back(std::make_pair(static_cast<int>(good_walkers[iw]->Multiplicity), iw));
351  std::sort(ncopy_pairs.begin(), ncopy_pairs.end());
352 
353  struct job
354  {
355  const int walkerID;
356  const int target;
357  job(int wid, int target_in) : walkerID(wid), target(target_in){};
358  };
359 
360  int nsend = 0;
361  std::vector<job> job_list;
362  std::vector<WalkerElementsRef> newW;
363  std::vector<int> ncopy_newW;
364 
365  for (int ic = 0; ic < nswap; ic++)
366  {
367  int nsentcopy = 0;
368  if (plus[ic] == rank_num_)
369  {
370  // always send the last good walker with most copies
371  // count the possible copies in one send
372  for (int id = ic + 1; id < nswap; id++)
373  if (plus[ic] == plus[id] && minus[ic] == minus[id] && ncopy_pairs.back().first > 1)
374  { // increment copy counter
375  ncopy_pairs.back().first--;
376  nsentcopy++;
377  }
378  else
379  { // not enough copies to send or not the same send/recv pair
380  break;
381  }
382 
383  // send the number of copies to the target
384  myComm->comm.send_value(nsentcopy, minus[ic]);
385  job_list.push_back(job(ncopy_pairs.back().second, minus[ic]));
386 #ifdef MCWALKERSET_MPI_DEBUG
387  fout << "rank " << plus[ic] << " sends a walker with " << nsentcopy << " copies to rank " << minus[ic]
388  << std::endl;
389 #endif
390 
391  // update counter and cursor
392  ++nsend;
393 
394  // update copy counter
395  if (ncopy_pairs.back().first > 1)
396  {
397  ncopy_pairs.back().first--;
398  std::sort(ncopy_pairs.begin(), ncopy_pairs.end());
399  }
400  else
401  {
402  good_walkers[ncopy_pairs.back().second]->Multiplicity = 0.0;
403  ncopy_pairs.pop_back();
404  }
405  }
406 
407  if (minus[ic] == rank_num_)
408  {
409  newW.push_back(pop.spawnWalker());
410 
411  // recv the number of copies from the target
412  myComm->comm.receive_n(&nsentcopy, 1, plus[ic]);
413  job_list.push_back(job(newW.size() - 1, plus[ic]));
414  if (plus[ic] != plus[ic + nsentcopy] || minus[ic] != minus[ic + nsentcopy])
415  throw std::runtime_error("WalkerControl::swapWalkersSimple send/recv pair checking failed!");
416 #ifdef MCWALKERSET_MPI_DEBUG
417  fout << "rank " << minus[ic] << " recvs a walker with " << nsentcopy << " copies from rank " << plus[ic]
418  << std::endl;
419 #endif
420 
421  ncopy_newW.push_back(nsentcopy);
422  }
423 
424  // update cursor
425  ic += nsentcopy;
426  }
427 
428  if (nsend > 0)
429  {
430  std::vector<mpi3::request> requests;
431  // mark all walkers not in send
432  for (auto jobit = job_list.begin(); jobit != job_list.end(); jobit++)
433  good_walkers[jobit->walkerID]->SendInProgress = false;
434  for (auto jobit = job_list.begin(); jobit != job_list.end(); jobit++)
435  {
436  // pack data and send
437  auto& awalker = good_walkers[jobit->walkerID];
438  size_t byteSize = awalker->byteSize();
439  if (!awalker->SendInProgress)
440  {
441  awalker->updateBuffer();
442  awalker->SendInProgress = true;
443  }
444  if (use_nonblocking_)
445  requests.push_back(myComm->comm.isend_n(awalker->DataSet.data(), byteSize, jobit->target));
446  else
447  {
448  ScopedTimer local_timer(my_timers_[WC_send]);
449  myComm->comm.send_n(awalker->DataSet.data(), byteSize, jobit->target);
450  }
451  }
452  if (use_nonblocking_)
453  {
454  // wait all the isend
455  for (int im = 0; im < requests.size(); im++)
456  {
457  ScopedTimer local_timer(my_timers_[WC_send]);
458  requests[im].wait();
459  }
460  requests.clear();
461  }
462  }
463  else
464  {
465  std::vector<mpi3::request> requests;
466  for (auto jobit = job_list.begin(); jobit != job_list.end(); jobit++)
467  {
468  // recv and unpack data
469  auto& walker_elements = newW[jobit->walkerID];
470  auto& awalker = walker_elements.walker;
471  size_t byteSize = awalker.byteSize();
472  if (use_nonblocking_)
473  requests.push_back(myComm->comm.ireceive_n(awalker.DataSet.data(), byteSize, jobit->target));
474  else
475  {
476  ScopedTimer local_timer(my_timers_[WC_recv]);
477  myComm->comm.receive_n(awalker.DataSet.data(), byteSize, jobit->target);
478  awalker.copyFromBuffer();
479  }
480  }
481  if (use_nonblocking_)
482  {
483  std::vector<bool> not_completed(requests.size(), true);
484  bool completed = false;
485  while (!completed)
486  {
487  completed = true;
488  for (int im = 0; im < requests.size(); im++)
489  if (not_completed[im])
490  {
491  if (requests[im].completed())
492  {
493  auto& walker_elements = newW[job_list[im].walkerID];
494  walker_elements.walker.copyFromBuffer();
495  not_completed[im] = false;
496  }
497  else
498  completed = false;
499  }
500  }
501  requests.clear();
502  }
503  }
504 
505  //save the number of walkers sent
506  saved_num_walkers_sent_ = nsend;
507 
508  // rebuild Multiplicity
509  for (int iw = 0; iw < ncopy_pairs.size(); iw++)
510  good_walkers[ncopy_pairs[iw].second]->Multiplicity = ncopy_pairs[iw].first;
511 
512  for (int iw = 0; iw < newW.size(); iw++)
513  newW[iw].walker.Multiplicity = ncopy_newW[iw] + 1;
514 
515 #ifndef NDEBUG
516  FullPrecRealType TotalMultiplicity = 0;
517  for (int iw = 0; iw < good_walkers.size(); iw++)
518  TotalMultiplicity += good_walkers[iw]->Multiplicity;
519  if (static_cast<int>(TotalMultiplicity) != fair_offset_[rank_num_ + 1] - fair_offset_[rank_num_])
520  throw std::runtime_error("Multiplicity check failed in WalkerControl::swapWalkersSimple!");
521 #endif
522 }
523 #endif
524 
526 {
527  // kill walkers, actually put them in deadlist
528  RefVector<MCPWalker> bad_walkers;
529  auto& walkers = pop.get_walkers();
530  bad_walkers.reserve(walkers.size());
531  for (auto& walker : walkers)
532  if (static_cast<int>(walker->Multiplicity) == 0)
533  bad_walkers.push_back(*walker);
534  for (MCPWalker& bad_walker : bad_walkers)
535  pop.killWalker(bad_walker);
536 #ifndef NDEBUG
537  pop.checkIntegrity();
538 #endif
539 }
540 
541 std::vector<WalkerControl::IndexType> WalkerControl::syncFutureWalkersPerRank(Communicate* comm, IndexType n_walkers)
542 {
543  int ncontexts = comm->size();
544  std::vector<IndexType> future_walkers(ncontexts, 0);
545  future_walkers[comm->rank()] = n_walkers;
546  comm->allreduce(future_walkers);
547  return future_walkers;
548 }
549 
550 bool WalkerControl::put(xmlNodePtr cur)
551 {
552  int nw_target = 0, nw_max = 0;
553  ParameterSet params;
554  params.add(max_copy_, "maxCopy");
555  params.add(nw_target, "targetwalkers");
556  params.add(nw_max, "max_walkers");
557  params.add(use_nonblocking_, "use_nonblocking", {true});
558  params.add(debug_disable_branching_, "debug_disable_branching", {false});
559 
560  try
561  {
562  bool success = params.put(cur);
563  }
564  catch (const std::runtime_error& re)
565  {
566  myComm->barrier_and_abort("WalkerControl::put parsing error. " + std::string(re.what()));
567  }
568 
569  setMinMax(nw_target, nw_max);
570 
571  app_log() << " WalkerControl parameters " << std::endl;
572  //app_log() << " energyBound = " << targetEnergyBound << std::endl;
573  //app_log() << " sigmaBound = " << targetSigma << std::endl;
574  app_log() << " maxCopy = " << max_copy_ << std::endl;
575  app_log() << " Max Walkers per MPI rank " << n_max_ << std::endl;
576  app_log() << " Min Walkers per MPI rank " << n_min_ << std::endl;
577  app_log() << " Using " << (use_nonblocking_ ? "non-" : "") << "blocking send/recv" << std::endl;
579  app_log() << " Disable branching for debugging as the user input request." << std::endl;
580  return true;
581 }
582 
583 void WalkerControl::setMinMax(int nw_in, int nmax_in)
584 {
585  if (nw_in > 0)
586  {
587  int npernode = nw_in / num_ranks_;
588  if (use_fixed_pop_)
589  {
590  n_max_ = npernode;
591  n_min_ = npernode;
592  }
593  else
594  {
595  n_max_ = max_copy_ * npernode + 1;
596  n_min_ = npernode / 5 + 1;
597  if (nmax_in > 0)
598  n_max_ = nmax_in;
599  }
600  }
601 }
602 
603 } // namespace qmcplusplus
RandomBase< FullPrecRealType > & rng_
random number generator
IndexType saved_num_walkers_sent_
Number of walkers sent during the exchange.
std::filesystem::path dmcFname
filename for dmc.dat
void set_ensemble_property(const MCDataType< QMCTraits::FullPrecRealType > &ensemble_property)
Definition: MCPopulation.h:189
WalkerElementsRef spawnWalker()
State Requirement:
Base class for any object which needs to know about a MPI communicator.
Definition: MPIObjectBase.h:26
helper functions for EinsplineSetBuilder
Definition: Configuration.h:43
int rank() const
return the rank
Definition: Communicate.h:116
std::vector< int > fair_offset_
offset of the particle index for a fair distribution
std::vector< TimerIDName_t< T > > TimerNameList_t
Definition: TimerManager.h:156
UPtrVector< MCPWalker > & get_walkers()
Definition: MCPopulation.h:194
FullPrecRealType trial_energy_
trial energy energy
std::ostream & app_log()
Definition: OutputManager.h:65
std::ostream & app_error()
Definition: OutputManager.h:67
ScopeGuard< NewTimer > ScopedTimer
Definition: NewTimer.h:257
void writeDMCdat(int iter, const std::vector< FullPrecRealType > &curData)
take averages and writes to a file
std::vector< std::unique_ptr< T > > UPtrVector
const char walkers[]
Definition: HDFVersion.h:36
bool put(xmlNodePtr cur)
const IndexType rank_num_
context id
bool debug_disable_branching_
disable branching for debugging
int size() const
return the number of tasks
Definition: Communicate.h:118
void setMinMax(int nw_in, int nmax_in)
const IndexType num_ranks_
number of contexts
T min(T a, T b)
bool put(std::istream &is) override
read from std::istream
Definition: ParameterSet.h:42
void branch(int iter, MCPopulation &pop, bool do_not_branch)
unified: perform branch and swap walkers as required
void FairDivideLow(int ntot, int npart, IV &adist)
partition ntot elements among npart
Definition: FairDivide.h:114
for(int i=0;i< size_test;++i) CHECK(Approx(gauss_random_vals[offset_for_rs+i])
IndexType n_min_
minimum number of walkers
bool use_nonblocking_
Use non-blocking isend/irecv.
void checkIntegrity() const
}@
Wrapping information on parallelism.
Definition: Communicate.h:68
void allreduce(T &)
const std::string & getName() const
Definition: Communicate.h:131
class to handle a set of parameters
Definition: ParameterSet.h:27
WalkerProperties::Indexes WP
Definition: ParticleSet.cpp:34
Communicate * myComm
pointer to Communicate
Definition: MPIObjectBase.h:62
void syncWalkersPerRank(Communicate *comm)
IndexType n_max_
maximum number of walkers
static std::vector< IndexType > syncFutureWalkersPerRank(Communicate *comm, IndexType n_walkers)
void set_num_global_walkers(IndexType num_global_walkers)
Definition: MCPopulation.h:183
static void killDeadWalkersOnRank(MCPopulation &pop)
kill dead walkers in the population
std::vector< int > num_per_rank_
number of walkers on each MPI rank after branching before load balancing
std::unique_ptr< T > UPtr
TimerList_t my_timers_
timers
void start()
start a block
Declaration of a TrialWaveFunction.
void add(PDT &aparam, const std::string &aname_in, std::vector< PDT > candidate_values={}, TagStatus status=TagStatus::OPTIONAL)
add a new parameter corresponding to an xmlNode <parameter>
std::vector< std::reference_wrapper< T > > RefVector
TimerNameList_t< WC_Timers > WalkerControlTimerNames
IndexType get_num_global_walkers() const
Definition: MCPopulation.h:168
Indexes
an enum denoting index of physical properties
Walker< QMCTraits, PtclOnLatticeTraits > & walker
TimerManager< NewTimer > & getGlobalTimerManager()
void barrier_and_abort(const std::string &msg) const
~WalkerControl()
empty destructor to clean up the derived classes
IndexType max_copy_
maximum copy per walker
QMCTraits::IndexType IndexType
typedef of IndexType
Definition: WalkerControl.h:50
IndexType get_num_local_walkers() const
Definition: MCPopulation.h:169
WalkerControl(Communicate *c, RandomBase< FullPrecRealType > &rng, bool use_fixed_pop=false)
default constructor
MCDataType< FullPrecRealType > ensemble_property_
ensemble properties
void killWalker(MCPWalker &)
Kill a walker (just barely)
A container class to represent a walker.
Definition: Walker.h:49
void computeCurData(const UPtrVector< MCPWalker > &walkers, std::vector< FullPrecRealType > &curData)
compute curData
QMCTraits::FullPrecRealType FullPrecRealType
typedef of FullPrecRealType
Definition: WalkerControl.h:48
std::unique_ptr< std::ofstream > dmcStream
file to save energy histogram
static void determineNewWalkerPopulation(const std::vector< int > &num_per_rank, std::vector< int > &fair_offset, std::vector< int > &minus, std::vector< int > &plus)
creates the distribution plan
bool use_fixed_pop_
if true, use fixed population
std::vector< FullPrecRealType > curData
any temporary data includes many ridiculous conversions of integral types to and from fp ...