QMCPACK
CommOperatorsMPI.h
Go to the documentation of this file.
1 //////////////////////////////////////////////////////////////////////////////////////
2 // This file is distributed under the University of Illinois/NCSA Open Source License.
3 // See LICENSE file in top directory for details.
4 //
5 // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
6 //
7 // File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
8 // Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
9 // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
10 // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
11 //
12 // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
13 //////////////////////////////////////////////////////////////////////////////////////
14 
15 
16 #ifndef OHMMS_COMMUNICATION_OPERATORS_MPI_H
17 #define OHMMS_COMMUNICATION_OPERATORS_MPI_H
18 #include "Pools/PooledData.h"
19 #include <cstdint>
20 #include <stdexcept>
21 ///dummy declarations to be specialized
22 template<typename T>
23 inline void gsum(T&, int)
24 {
25  throw std::runtime_error("Need specialization for gsum(T&, int)");
26 }
27 
28 template<typename T>
29 inline void Communicate::allreduce(T&)
30 {
31  throw std::runtime_error("Need specialization for allreduce(T&)");
32 }
33 
34 template<typename T>
35 inline void Communicate::reduce(T&)
36 {
37  throw std::runtime_error("Need specialization for reduce(T&)");
38 }
39 
40 template<typename T>
41 inline void Communicate::reduce(T* restrict, T* restrict, int n)
42 {
43  throw std::runtime_error("Need specialization for reduce(T* restrict , T* restrict, int n)");
44 }
45 
46 template<typename T>
47 inline void Communicate::reduce_in_place(T* restrict, int n)
48 {
49  throw std::runtime_error("Need specialization for reduce_in_place(T* restrict, int n)");
50 }
51 
52 template<typename T>
53 inline void Communicate::bcast(T&)
54 {
55  throw std::runtime_error("Need specialization for bcast(T&)");
56 }
57 
58 template<typename T>
59 inline void Communicate::bcast(T* restrict, int n)
60 {
61  throw std::runtime_error("Need specialization for bcast(T* restrict ,int n)");
62 }
63 
64 template<typename T>
65 inline void Communicate::send(int dest, int tag, T&)
66 {
67  throw std::runtime_error("Need specialization for send(int, int, T& )");
68 }
69 
70 template<typename T>
71 inline void Communicate::gather(T& sb, T& rb, int dest)
72 {
73  throw std::runtime_error("Need specialization for gather(T&, T&, int)");
74 }
75 
76 template<typename T>
77 inline void Communicate::allgather(T& sb, T& rb, int count)
78 {
79  throw std::runtime_error("Need specialization for allgather(T&, T&, int)");
80 }
81 
82 template<typename T, typename IT>
83 inline void Communicate::gatherv(T& sb, T& rb, IT&, IT&, int dest)
84 {
85  throw std::runtime_error("Need specialization for gatherv(T&, T&, IT&, IT&, int)");
86 }
87 
88 template<typename T>
89 inline void Communicate::scatter(T& sb, T& rb, int dest)
90 {
91  throw std::runtime_error("Need specialization for scatter(T&, T&, int)");
92 }
93 
94 template<typename T, typename IT>
95 inline void Communicate::scatterv(T& sb, T& rb, IT&, IT&, int source)
96 {
97  throw std::runtime_error("Need specialization for scatterv(T&, T&, IT&, IT&, int)");
98 }
99 
100 template<typename T>
101 inline Communicate::request Communicate::irecv(int source, int tag, T&)
102 {
103  throw std::runtime_error("Need specialization for irecv(int source, int tag, T& )");
104  return MPI_REQUEST_NULL;
105 }
106 
107 template<typename T>
108 inline Communicate::request Communicate::isend(int dest, int tag, T&)
109 {
110  throw std::runtime_error("Need specialization for isend(int source, int tag, T& )");
111  return MPI_REQUEST_NULL;
112 }
113 
114 template<typename T>
115 inline Communicate::request Communicate::irecv(int source, int tag, T*, int n)
116 {
117  throw std::runtime_error("Need specialization for irecv(int source, int tag, T*, int )");
118  return MPI_REQUEST_NULL;
119 }
120 
121 template<typename T>
122 inline Communicate::request Communicate::isend(int dest, int tag, T*, int n)
123 {
124  throw std::runtime_error("Need specialization for isend(int source, int tag, T*, int )");
125  return MPI_REQUEST_NULL;
126 }
127 
128 template<typename T>
129 inline void Communicate::allgather(T* sb, T* rb, int count)
130 {
131  throw std::runtime_error("Need specialization for allgather(T*, T*, int)");
132 }
133 
134 template<typename T, typename IT>
135 inline void Communicate::gatherv(T* sb, T* rb, int n, IT&, IT&, int dest)
136 {
137  throw std::runtime_error("Need specialization for gatherv(T*, T*, int, IT&, IT&, int)");
138 }
139 
140 template<typename T>
141 inline void Communicate::gsum(T&)
142 {
143  throw std::runtime_error("Need specialization for Communicate::::gsum(T&)");
144 }
145 
146 template<>
147 inline void gsum(int& g, int gid)
148 {
149  int gt = g;
150  MPI_Allreduce(&(gt), &(g), 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
151 }
152 
153 template<unsigned N>
154 inline void gsum(qmcplusplus::TinyVector<double, N>& g, int gid)
155 {
156  //TinyVector<double,N> gt = g;
157  //MPI_Allreduce(gt.begin(), g.begin(), N, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
159  MPI_Allreduce(g.begin(), gt.begin(), N, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
160  g = gt;
161 }
162 
163 template<>
164 inline void gsum(std::vector<int>& g, int gid)
165 {
166  std::vector<int> gt(g.size(), 0);
167  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_INT, MPI_SUM, MPI_COMM_WORLD);
168  g = gt;
169 }
170 
171 template<>
172 inline void gsum(double& g, int gid)
173 {
174  double gt = g;
175  MPI_Allreduce(&(gt), &(g), 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
176 }
177 
178 template<unsigned N>
179 inline void gsum(qmcplusplus::TinyVector<int, N>& g, int gid)
180 {
181  //TinyVector<double,N> gt = g;
182  //MPI_Allreduce(gt.begin(), g.begin(), N, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
184  MPI_Allreduce(g.begin(), gt.begin(), N, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
185  g = gt;
186 }
187 
188 template<>
189 inline void gsum(std::vector<double>& g, int gid)
190 {
191  std::vector<double> gt(g.size(), 0.0);
192  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
193  g = gt;
194 }
195 
196 template<>
197 inline void gsum(qmcplusplus::Matrix<double>& g, int gid)
198 {
199  //TinyVector<double,N> gt = g;
200  //MPI_Allreduce(gt.begin(), g.begin(), N, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
201  std::vector<double> gt(g.size());
202  copy(g.begin(), g.end(), gt.begin());
203  MPI_Allreduce(g.data(), &gt[0], g.size(), MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
204  copy(gt.begin(), gt.end(), g.data());
205 }
206 
207 template<>
208 inline void Communicate::allreduce(int& g)
209 {
210  if (d_ncontexts == 1)
211  return;
212  int gt = g;
213  MPI_Allreduce(&(gt), &(g), 1, MPI_INT, MPI_SUM, myMPI);
214 }
215 
216 template<>
217 inline void Communicate::allreduce(long& g)
218 {
219  if (d_ncontexts == 1)
220  return;
221  long gt = g;
222  MPI_Allreduce(&(gt), &(g), 1, MPI_LONG, MPI_SUM, myMPI);
223 }
224 
225 template<>
226 inline void Communicate::allreduce(unsigned long& g)
227 {
228  if (d_ncontexts == 1)
229  return;
230  unsigned long gt = g;
231  MPI_Allreduce(&(gt), &(g), 1, MPI_UNSIGNED_LONG, MPI_SUM, myMPI);
232 }
233 
234 template<>
235 inline void Communicate::allreduce(float& g)
236 {
237  if (d_ncontexts == 1)
238  return;
239  float gt = g;
240  MPI_Allreduce(&(gt), &(g), 1, MPI_FLOAT, MPI_SUM, myMPI);
241 }
242 
243 template<>
244 inline void Communicate::allreduce(double& g)
245 {
246  if (d_ncontexts == 1)
247  return;
248  double gt = g;
249  MPI_Allreduce(&(gt), &(g), 1, MPI_DOUBLE, MPI_SUM, myMPI);
250 }
251 
252 template<>
254 {
255  if (d_ncontexts == 1)
256  return;
258  MPI_Allreduce(g.begin(), gt.begin(), OHMMS_DIM, MPI_FLOAT, MPI_SUM, myMPI);
259  g = gt;
260 }
261 
262 template<>
264 {
265  if (d_ncontexts == 1)
266  return;
268  MPI_Allreduce(g.begin(), gt.begin(), OHMMS_DIM, MPI_DOUBLE, MPI_SUM, myMPI);
269  g = gt;
270 }
271 
272 template<>
274 {
275  if (d_ncontexts == 1)
276  return;
278  MPI_Allreduce(g.begin(), gt.begin(), OHMMS_DIM, MPI_INT, MPI_SUM, myMPI);
279  g = gt;
280 }
281 
282 template<>
283 inline void Communicate::allreduce(std::vector<int>& g)
284 {
285  if (d_ncontexts == 1)
286  return;
287  std::vector<int> gt(g.size(), 0);
288  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_INT, MPI_SUM, myMPI);
289  g = gt;
290 }
291 
292 template<>
293 inline void Communicate::allreduce(std::vector<long>& g)
294 {
295  if (d_ncontexts == 1)
296  return;
297  std::vector<long> gt(g.size(), 0);
298  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_LONG, MPI_SUM, myMPI);
299  g = gt;
300 }
301 
302 template<>
303 inline void Communicate::allreduce(std::vector<unsigned long>& g)
304 {
305  if (d_ncontexts == 1)
306  return;
307  std::vector<unsigned long> gt(g.size(), 0);
308  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_UNSIGNED_LONG, MPI_SUM, myMPI);
309  g = gt;
310 }
311 
312 template<>
313 inline void Communicate::allreduce(std::vector<float>& g)
314 {
315  std::vector<float> gt(g.size(), 0.0f);
316  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_FLOAT, MPI_SUM, myMPI);
317  g = gt;
318 }
319 
320 template<>
321 inline void Communicate::allreduce(std::vector<double>& g)
322 {
323  std::vector<double> gt(g.size(), 0.0);
324  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_DOUBLE, MPI_SUM, myMPI);
325  g = gt;
326 }
327 
328 template<>
329 inline void Communicate::allreduce(std::vector<std::complex<float>>& g)
330 {
331  std::vector<std::complex<float>> gt(g.size(), std::complex<float>(0.0));
332  MPI_Allreduce(&(g[0]), &(gt[0]), 2 * g.size(), MPI_FLOAT, MPI_SUM, myMPI);
333  g = gt;
334 }
335 
336 template<>
337 inline void Communicate::allreduce(std::vector<std::complex<double>>& g)
338 {
339  std::vector<std::complex<double>> gt(g.size(), std::complex<double>(0.0));
340  MPI_Allreduce(&(g[0]), &(gt[0]), 2 * g.size(), MPI_DOUBLE, MPI_SUM, myMPI);
341  g = gt;
342 }
343 
344 template<>
346 {
347  PooledData<float> gt(g.size());
348  MPI_Allreduce(g.data(), gt.data(), g.size(), MPI_FLOAT, MPI_SUM, myMPI);
349  g = gt;
350 }
351 
352 template<>
354 {
355  PooledData<double> gt(g.size());
356  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_DOUBLE, MPI_SUM, myMPI);
357  g = gt;
358 }
359 
360 template<>
362 {
363  std::vector<float> gt(g.size());
364  std::copy(g.begin(), g.end(), gt.begin());
365  MPI_Allreduce(g.data(), &gt[0], g.size(), MPI_FLOAT, MPI_SUM, myMPI);
366  std::copy(gt.begin(), gt.end(), g.data());
367 }
368 
369 template<>
371 {
372  std::vector<double> gt(g.size());
373  copy(g.begin(), g.end(), gt.begin());
374  MPI_Allreduce(g.data(), &gt[0], g.size(), MPI_DOUBLE, MPI_SUM, myMPI);
375  copy(gt.begin(), gt.end(), g.data());
376 }
377 
378 template<>
379 inline void Communicate::reduce(std::vector<float>& g)
380 {
381  std::vector<float> gt(g.size(), 0.0f);
382  MPI_Reduce(&(g[0]), &(gt[0]), g.size(), MPI_FLOAT, MPI_SUM, 0, myMPI);
383  if (!d_mycontext)
384  g = gt;
385 }
386 
387 template<>
388 inline void Communicate::reduce(std::vector<double>& g)
389 {
390  std::vector<double> gt(g.size(), 0.0);
391  MPI_Reduce(&(g[0]), &(gt[0]), g.size(), MPI_DOUBLE, MPI_SUM, 0, myMPI);
392  if (!d_mycontext)
393  g = gt;
394 }
395 
396 template<>
397 inline void Communicate::reduce(std::vector<int>& g)
398 {
399  std::vector<int> gt(g.size(), 0.0);
400  MPI_Reduce(&(g[0]), &(gt[0]), g.size(), MPI_INT, MPI_SUM, 0, myMPI);
401  if (!d_mycontext)
402  g = gt;
403 }
404 
405 template<>
406 inline void Communicate::reduce(std::vector<long>& g)
407 {
408  std::vector<long> gt(g.size(), 0.0);
409  MPI_Reduce(&(g[0]), &(gt[0]), g.size(), MPI_LONG, MPI_SUM, 0, myMPI);
410  if (!d_mycontext)
411  g = gt;
412 }
413 
414 template<>
415 inline void Communicate::reduce(int* restrict g, int* restrict res, int n)
416 {
417  MPI_Reduce(g, res, n, MPI_INT, MPI_SUM, 0, myMPI);
418 }
419 
420 template<>
421 inline void Communicate::reduce(double* restrict g, double* restrict res, int n)
422 {
423  MPI_Reduce(g, res, n, MPI_DOUBLE, MPI_SUM, 0, myMPI);
424 }
425 
426 template<>
427 inline void Communicate::reduce_in_place(double* restrict res, int n)
428 {
429  if (!d_mycontext)
430  MPI_Reduce(MPI_IN_PLACE, res, n, MPI_DOUBLE, MPI_SUM, 0, myMPI);
431  else
432  MPI_Reduce(res, NULL, n, MPI_DOUBLE, MPI_SUM, 0, myMPI);
433 }
434 
435 template<>
436 inline void Communicate::reduce_in_place(float* restrict res, int n)
437 {
438  if (!d_mycontext)
439  MPI_Reduce(MPI_IN_PLACE, res, n, MPI_FLOAT, MPI_SUM, 0, myMPI);
440  else
441  MPI_Reduce(res, NULL, n, MPI_FLOAT, MPI_SUM, 0, myMPI);
442 }
443 
444 template<>
445 inline void Communicate::bcast(int& g)
446 {
447  MPI_Bcast(&g, 1, MPI_INT, 0, myMPI);
448 }
449 
450 template<>
451 inline void Communicate::bcast(uint32_t& g)
452 {
453  MPI_Bcast(&g, 1, MPI_UNSIGNED, 0, myMPI);
454 }
455 
456 template<>
457 inline void Communicate::bcast(std::vector<uint32_t>& g)
458 {
459  MPI_Bcast(&(g[0]), g.size(), MPI_UNSIGNED, 0, myMPI);
460 }
461 
462 
463 template<>
464 inline void Communicate::bcast(double& g)
465 {
466  MPI_Bcast(&g, 1, MPI_DOUBLE, 0, myMPI);
467 }
468 
469 template<>
470 inline void Communicate::bcast(float& g)
471 {
472  MPI_Bcast(&g, 1, MPI_FLOAT, 0, myMPI);
473 }
474 
475 template<>
476 inline void Communicate::bcast(bool& g)
477 {
478  int val = g ? 1 : 0;
479  MPI_Bcast(&val, 1, MPI_INT, 0, myMPI);
480  g = val != 0;
481 }
482 
483 template<>
485 {
486  MPI_Bcast(g.begin(), 2, MPI_DOUBLE, 0, myMPI);
487 }
488 
489 template<>
491 {
492  MPI_Bcast(g.begin(), 2, MPI_INT, 0, myMPI);
493 }
494 
495 template<>
497 {
498  MPI_Bcast(g.begin(), 3, MPI_INT, 0, myMPI);
499 }
500 
501 template<>
503 {
504  MPI_Bcast(&g[0][0], 3 * g.size(), MPI_INT, 0, myMPI);
505 }
506 
507 template<>
509 {
510  MPI_Bcast(g.begin(), 3, MPI_DOUBLE, 0, myMPI);
511 }
512 
513 template<>
515 {
516  MPI_Bcast(g.begin(), 3, MPI_FLOAT, 0, myMPI);
517 }
518 
519 template<>
521 {
522  MPI_Bcast(g.begin(), 4, MPI_DOUBLE, 0, myMPI);
523 }
524 
525 template<>
527 {
528  MPI_Bcast(&(g[0]), 9, MPI_DOUBLE, 0, myMPI);
529 }
530 
531 template<>
533 {
534  MPI_Bcast(&(g[0]), 9, MPI_FLOAT, 0, myMPI);
535 }
536 
537 template<>
539 {
540  MPI_Bcast(&(g[0]), g.size(), MPI_DOUBLE, 0, myMPI);
541 }
542 
543 template<>
545 {
546  MPI_Bcast(&(g[0]), g.size(), MPI_FLOAT, 0, myMPI);
547 }
548 
549 template<>
550 inline void Communicate::bcast(qmcplusplus::Vector<std::complex<double>>& g)
551 {
552  MPI_Bcast(&(g[0]), 2 * g.size(), MPI_DOUBLE, 0, myMPI);
553 }
554 
555 template<>
556 inline void Communicate::bcast(qmcplusplus::Vector<std::complex<float>>& g)
557 {
558  MPI_Bcast(&(g[0]), 2 * g.size(), MPI_FLOAT, 0, myMPI);
559 }
560 
561 template<>
563 {
564  MPI_Bcast(&(g[0]), g.size(), MPI_INT, 0, myMPI);
565 }
566 
567 
568 template<>
570 {
571  MPI_Bcast(&(g[0]), 2 * g.size(), MPI_DOUBLE, 0, myMPI);
572 }
573 
574 template<>
576 {
577  MPI_Bcast(&(g[0]), 3 * g.size(), MPI_DOUBLE, 0, myMPI);
578 }
579 
580 template<>
582 {
583  MPI_Bcast(&(g[0]), 3 * g.size(), MPI_FLOAT, 0, myMPI);
584 }
585 
586 template<>
588 {
589  MPI_Bcast(g.data(), g.size(), MPI_DOUBLE, 0, myMPI);
590 }
591 
592 template<>
594 {
595  MPI_Bcast(g.data(), g.size(), MPI_FLOAT, 0, myMPI);
596 }
597 
598 template<>
600 {
601  MPI_Bcast(g.data(), g.size(), MPI_INT, 0, myMPI);
602 }
603 
604 
605 template<>
606 inline void Communicate::bcast(Array<std::complex<double>, 1>& g)
607 {
608  MPI_Bcast(g.data(), 2 * g.size(), MPI_DOUBLE, 0, myMPI);
609 }
610 
611 
612 template<>
613 inline void Communicate::bcast(Array<std::complex<double>, 2>& g)
614 {
615  MPI_Bcast(g.data(), 2 * g.size(), MPI_DOUBLE, 0, myMPI);
616 }
617 
618 template<>
619 inline void Communicate::bcast(Array<std::complex<double>, 3>& g)
620 {
621  MPI_Bcast(g.data(), 2 * g.size(), MPI_DOUBLE, 0, myMPI);
622 }
623 
624 template<>
625 inline void Communicate::bcast(Array<std::complex<float>, 3>& g)
626 {
627  MPI_Bcast(g.data(), 2 * g.size(), MPI_FLOAT, 0, myMPI);
628 }
629 
630 
631 template<>
632 inline void Communicate::bcast(std::vector<double>& g)
633 {
634  MPI_Bcast(&(g[0]), g.size(), MPI_DOUBLE, 0, myMPI);
635 }
636 
637 template<>
638 inline void Communicate::bcast(std::vector<std::complex<double>>& g)
639 {
640  MPI_Bcast(&(g[0]), 2 * g.size(), MPI_DOUBLE, 0, myMPI);
641 }
642 
643 template<>
644 inline void Communicate::bcast(std::vector<std::complex<float>>& g)
645 {
646  MPI_Bcast(&(g[0]), 2 * g.size(), MPI_FLOAT, 0, myMPI);
647 }
648 
649 template<>
650 inline void Communicate::bcast(std::vector<float>& g)
651 {
652  MPI_Bcast(&(g[0]), g.size(), MPI_FLOAT, 0, myMPI);
653 }
654 
655 template<>
657 {
658  MPI_Bcast(g.data(), g.size(), MPI_DOUBLE, 0, myMPI);
659 }
660 
661 template<>
663 {
664  MPI_Bcast(g.data(), g.size(), MPI_FLOAT, 0, myMPI);
665 }
666 
667 template<>
669 {
670  MPI_Bcast(g.data(), g.size(), MPI_INT, 0, myMPI);
671 }
672 
673 template<>
675 {
676  MPI_Bcast(&(g[0][0]), 2 * g.size(), MPI_DOUBLE, 0, myMPI);
677 }
678 
679 template<>
681 {
682  MPI_Bcast(&(g[0][0]), 3 * g.size(), MPI_DOUBLE, 0, myMPI);
683 }
684 
685 template<>
687 {
688  MPI_Bcast(&(g[0][0]), 3 * g.size(), MPI_FLOAT, 0, myMPI);
689 }
690 
691 template<>
692 inline void Communicate::bcast(std::vector<int>& g)
693 {
694  MPI_Bcast(&(g[0]), g.size(), MPI_INT, 0, myMPI);
695 }
696 
697 template<>
698 inline void Communicate::bcast(std::vector<bool>& g)
699 {
700  std::vector<int> intVec(g.size());
701  for (int i = 0; i < g.size(); i++)
702  intVec[i] = g[i] ? 1 : 0;
703  MPI_Bcast(&(intVec[0]), g.size(), MPI_INT, 0, myMPI);
704  for (int i = 0; i < g.size(); i++)
705  g[i] = intVec[i] != 0;
706 }
707 
708 template<>
709 inline void Communicate::bcast(double* restrict x, int n)
710 {
711  MPI_Bcast(x, n, MPI_DOUBLE, 0, myMPI);
712 }
713 
714 template<>
715 inline void Communicate::bcast(std::complex<double>* restrict x, int n)
716 {
717  MPI_Bcast(x, 2*n, MPI_DOUBLE, 0, myMPI);
718 }
719 
720 template<>
721 inline void Communicate::bcast(float* restrict x, int n)
722 {
723  MPI_Bcast(x, n, MPI_FLOAT, 0, myMPI);
724 }
725 
726 template<>
727 inline void Communicate::bcast(std::complex<float>* restrict x, int n)
728 {
729  MPI_Bcast(x, 2*n, MPI_FLOAT, 0, myMPI);
730 }
731 
732 template<>
733 inline void Communicate::bcast(int* restrict x, int n)
734 {
735  MPI_Bcast(x, n, MPI_INT, 0, myMPI);
736 }
737 
738 template<>
739 inline void Communicate::bcast(char* restrict x, int n)
740 {
741  MPI_Bcast(x, n, MPI_CHAR, 0, myMPI);
742 }
743 
744 template<>
745 inline void Communicate::bcast(std::string& g)
746 {
747  int string_size = g.size();
748 
749  bcast(string_size);
750  if (rank() != 0)
751  g.resize(string_size);
752 
753  bcast(&g[0], g.size());
754 }
755 
756 template<>
757 inline void Communicate::send(int dest, int tag, std::vector<double>& g)
758 {
759  MPI_Send(&(g[0]), g.size(), MPI_DOUBLE, dest, tag, myMPI);
760 }
761 
762 template<>
763 inline Communicate::request Communicate::isend(int dest, int tag, std::vector<double>& g)
764 {
765  request r;
766  MPI_Isend(&(g[0]), g.size(), MPI_DOUBLE, dest, tag, myMPI, &r);
767  return r;
768 }
769 
770 template<>
771 inline Communicate::request Communicate::irecv(int source, int tag, std::vector<double>& g)
772 {
773  request r;
774  MPI_Irecv(&(g[0]), g.size(), MPI_DOUBLE, source, tag, myMPI, &r);
775  return r;
776 }
777 
778 template<>
779 inline void Communicate::gatherv(std::vector<char>& l,
780  std::vector<char>& g,
781  std::vector<int>& counts,
782  std::vector<int>& displ,
783  int dest)
784 {
785  int ierr = MPI_Gatherv(&l[0], l.size(), MPI_CHAR, &g[0], &counts[0], &displ[0], MPI_CHAR, dest, myMPI);
786 }
787 
788 
789 template<>
790 inline void Communicate::gatherv(std::vector<double>& l,
791  std::vector<double>& g,
792  std::vector<int>& counts,
793  std::vector<int>& displ,
794  int dest)
795 {
796  int ierr = MPI_Gatherv(&l[0], l.size(), MPI_DOUBLE, &g[0], &counts[0], &displ[0], MPI_DOUBLE, dest, myMPI);
797 }
798 
799 template<>
800 inline void Communicate::gatherv(std::vector<float>& l,
801  std::vector<float>& g,
802  std::vector<int>& counts,
803  std::vector<int>& displ,
804  int dest)
805 {
806  int ierr = MPI_Gatherv(&l[0], l.size(), MPI_FLOAT, &g[0], &counts[0], &displ[0], MPI_FLOAT, dest, myMPI);
807 }
808 
809 template<>
810 inline void Communicate::gatherv(std::vector<int>& l,
811  std::vector<int>& g,
812  std::vector<int>& counts,
813  std::vector<int>& displ,
814  int dest)
815 {
816  int ierr = MPI_Gatherv(&l[0], l.size(), MPI_INT, &g[0], &counts[0], &displ[0], MPI_INT, dest, myMPI);
817 }
818 
819 template<>
820 inline void Communicate::allgather(std::vector<char>& sb, std::vector<char>& rb, int count)
821 {
822  MPI_Allgather(&sb[0], count, MPI_CHAR, &rb[0], count, MPI_CHAR, myMPI);
823 }
824 
825 template<>
826 inline void Communicate::allgather(std::vector<int>& sb, std::vector<int>& rb, int count)
827 {
828  MPI_Allgather(&sb[0], count, MPI_INT, &rb[0], count, MPI_INT, myMPI);
829 }
830 
831 
832 template<>
833 inline void Communicate::allgatherv(std::vector<int>& l,
834  std::vector<int>& g,
835  std::vector<int>& counts,
836  std::vector<int>& displ)
837 {
838  int ierr = MPI_Allgatherv(&l[0], l.size(), MPI_INT, &g[0], &counts[0], &displ[0], MPI_INT, myMPI);
839 }
840 
841 template<>
842 inline void Communicate::gatherv(std::vector<long>& l,
843  std::vector<long>& g,
844  std::vector<int>& counts,
845  std::vector<int>& displ,
846  int dest)
847 {
848  int ierr = MPI_Gatherv(&l[0], l.size(), MPI_LONG, &g[0], &counts[0], &displ[0], MPI_LONG, dest, myMPI);
849 }
850 
851 template<>
852 inline void Communicate::gather(std::vector<double>& l, std::vector<double>& g, int dest)
853 {
854  int ierr = MPI_Gather(&l[0], l.size(), MPI_DOUBLE, &g[0], l.size(), MPI_DOUBLE, dest, myMPI);
855 }
856 
857 template<>
858 inline void Communicate::gather(std::vector<char>& l, std::vector<char>& g, int dest)
859 {
860  int ierr = MPI_Gather(&l[0], l.size(), MPI_CHAR, &g[0], l.size(), MPI_CHAR, dest, myMPI);
861 }
862 
863 template<>
864 inline void Communicate::gather(std::vector<int>& l, std::vector<int>& g, int dest)
865 {
866  int ierr = MPI_Gather(&l[0], l.size(), MPI_INT, &g[0], l.size(), MPI_INT, dest, myMPI);
867 }
868 
869 template<>
872  std::vector<int>& counts,
873  std::vector<int>& displ,
874  int dest)
875 {
876  int ierr = MPI_Gatherv(l.data(), l.size(), MPI_DOUBLE, g.data(), &counts[0], &displ[0], MPI_DOUBLE, dest, myMPI);
877 }
878 
879 template<>
881 {
882  int ierr = MPI_Gather(l.data(), l.size(), MPI_DOUBLE, g.data(), l.size(), MPI_DOUBLE, dest, myMPI);
883 }
884 
885 template<>
886 inline void Communicate::gsum(std::vector<int>& g)
887 {
888  std::vector<int> gt(g.size(), 0.0);
889  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_INT, MPI_SUM, myMPI);
890  g = gt;
891 }
892 
893 template<>
894 inline void Communicate::gsum(std::vector<double>& g)
895 {
896  std::vector<double> gt(g.size(), 0.0);
897  MPI_Allreduce(&(g[0]), &(gt[0]), g.size(), MPI_DOUBLE, MPI_SUM, myMPI);
898  g = gt;
899 }
900 
901 template<>
902 inline void gsum(std::vector<std::complex<double>>& g, int gid)
903 {
904  std::vector<std::complex<double>> gt(g.size(), 0.0);
905  MPI_Allreduce(&(g[0]), &(gt[0]), 2 * g.size(), MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
906  g = gt;
907 }
908 
909 template<>
910 inline void Communicate::gsum(std::vector<std::complex<double>>& g)
911 {
912  std::vector<std::complex<double>> gt(g.size(), 0.0);
913  MPI_Allreduce(&(g[0]), &(gt[0]), 2 * g.size(), MPI_DOUBLE, MPI_SUM, myMPI);
914  g = gt;
915 }
916 
917 template<>
918 inline void Communicate::gatherv(char* l, char* g, int n, std::vector<int>& counts, std::vector<int>& displ, int dest)
919 {
920  int ierr = MPI_Gatherv(l, n, MPI_CHAR, g, &counts[0], &displ[0], MPI_CHAR, dest, myMPI);
921 }
922 
923 template<>
924 inline void Communicate::allgather(char* sb, char* rb, int count)
925 {
926  MPI_Allgather(sb, count, MPI_CHAR, rb, count, MPI_CHAR, myMPI);
927 }
928 
929 template<>
930 inline void Communicate::scatterv(std::vector<char>& sb,
931  std::vector<char>& rb,
932  std::vector<int>& counts,
933  std::vector<int>& displ,
934  int source)
935 {
936  int ierr = MPI_Scatterv(&sb[0], &counts[0], &displ[0], MPI_CHAR, &rb[0], rb.size(), MPI_CHAR, source, myMPI);
937 }
938 
939 template<typename T, typename TMPI, typename IT>
940 inline void Communicate::gatherv_in_place(T* buf, TMPI& datatype, IT& counts, IT& displ, int dest)
941 {
942  if (!d_mycontext)
943  MPI_Gatherv(MPI_IN_PLACE, 0, datatype, buf, counts.data(), displ.data(), datatype, dest, myMPI);
944  else
945  MPI_Gatherv(buf + displ[d_mycontext], counts[d_mycontext], datatype, NULL, counts.data(), displ.data(), datatype,
946  dest, myMPI);
947 }
948 
949 template<>
950 inline void Communicate::allreduce(qmcplusplus::Matrix<std::complex<double>>& g)
951 {
952  std::vector<std::complex<double>> gt(g.size());
953  std::copy(g.begin(), g.end(), gt.begin());
954  MPI_Allreduce(g.data(), &gt[0], 2 * g.size(), MPI_DOUBLE, MPI_SUM, myMPI);
955  std::copy(gt.begin(), gt.end(), g.data());
956 }
957 
958 template<>
959 inline void Communicate::allreduce(qmcplusplus::Matrix<std::complex<float>>& g)
960 {
961  std::vector<std::complex<float>> gt(g.size());
962  std::copy(g.begin(), g.end(), gt.begin());
963  MPI_Allreduce(g.data(), &gt[0], 2 * g.size(), MPI_FLOAT, MPI_SUM, myMPI);
964  std::copy(gt.begin(), gt.end(), g.data());
965 }
966 
967 template<>
968 inline void Communicate::bcast(std::complex<double>& g)
969 {
970  MPI_Bcast(&g, 2, MPI_DOUBLE, 0, myMPI);
971 }
972 
973 template<>
974 inline void Communicate::bcast(std::complex<float>& g)
975 {
976  MPI_Bcast(&g, 2, MPI_FLOAT, 0, myMPI);
977 }
978 
979 #endif
Container_t::iterator begin()
Definition: OhmmsMatrix.h:89
request isend(int dest, int tag, T &)
Fixed-size array.
Definition: OhmmsTinyMeta.h:30
size_type size() const
return the size of the data
Definition: PooledData.h:48
request irecv(int source, int tag, T &)
void reduce(T &)
int rank() const
return the rank
Definition: Communicate.h:116
void send(int dest, int tag, T &)
T * data()
return the address of the first element
Definition: PooledData.h:212
void gsum(T &, int)
dummy declarations to be specialized
void gatherv(T &sb, T &rb, IT &counts, IT &displ, int dest=0)
int d_mycontext
Rank.
Definition: Communicate.h:218
Type_t * data()
Definition: OhmmsArray.h:87
mpi_comm_type myMPI
Raw communicator.
Definition: Communicate.h:214
#define OHMMS_DIM
Definition: config.h:64
void copy(const Array< T1, 3 > &src, Array< T2, 3 > &dest)
Definition: Blitz.h:639
void allreduce(T &)
size_type size() const
Definition: OhmmsMatrix.h:76
size_type size() const
return the current size
Definition: OhmmsVector.h:162
void allgather(T &sb, T &rb, int count)
int d_ncontexts
Size.
Definition: Communicate.h:220
void gather(T &sb, T &rb, int dest=0)
size_t size() const
Definition: OhmmsArray.h:57
void gsum(T &)
void gatherv_in_place(T *buf, TMPI &datatype, IT &counts, IT &displ, int dest=0)
void scatter(T &sb, T &rb, int dest=0)
void reduce_in_place(T *restrict, int n)
static const int MPI_REQUEST_NULL
Definition: Communicate.h:47
void bcast(T &)
Container_t::iterator end()
Definition: OhmmsMatrix.h:90
Define a serialized buffer to store anonymous data.
void allgatherv(T &sb, T &rb, IT &counts, IT &displ)
void scatterv(T &sb, T &rb, IT &counts, IT &displ, int source=0)