-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathdistribute.C
86 lines (72 loc) · 2.34 KB
/
distribute.C
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
/*
Developed by Sandeep Sharma and Garnet K.-L. Chan, 2012
Copyright (c) 2012, Garnet K.-L. Chan
This program is integrated in Molpro with the permission of
Sandeep Sharma and Garnet K.-L. Chan
*/
#include "communicate.h"
#include "distribute.h"
#include "StackOperators.h"
#include "Stackwavefunction.h"
#ifndef SERIAL
#include "mpi.h"
#endif
namespace SpinAdapted{
void SplitStackmem()
{
//now we have to distribute remaining memory equally among different threads
long originalSize = Stackmem[0].size;
long remainingMem = Stackmem[0].size - Stackmem[0].memused;
long memPerThrd = remainingMem/numthrds;
Stackmem[0].size = Stackmem[0].memused+memPerThrd;
for (int i=1; i<numthrds; i++) {
Stackmem[i].data = Stackmem[i-1].data+Stackmem[i-1].size;
Stackmem[i].memused = 0;
Stackmem[i].size = memPerThrd;
}
Stackmem[numthrds-1].size += remainingMem%numthrds;
}
void MergeStackmem()
{
//put all the memory again in the zeroth thrd
for (int i=1; i<numthrds; i++) {
Stackmem[0].size += Stackmem[i].size;
Stackmem[i].data = 0;
Stackmem[i].memused = 0;
Stackmem[i].size = 0;
}
}
#ifndef SERIAL
#include <boost/mpi/communicator.hpp>
void distributedaccumulate(StackSparseMatrix& component)
{
dmrginp.datatransfer->start();
Timer distributetimer;
boost::mpi::communicator world;
int size = world.size();
int rank = world.rank();
if (size > 1)
{
MPI_Allreduce(MPI_IN_PLACE, component.get_data(), component.memoryUsed(), MPI_DOUBLE, MPI_SUM, Calc);
//MPI::COMM_WORLD.Allreduce(component.get_data(), &tempArray[0], component.memoryUsed(), MPI_DOUBLE, MPI_SUM);
}
dmrginp.datatransfer->stop();
}
void distributedaccumulate(DiagonalMatrix& component)
{
dmrginp.datatransfer->start();
Timer distributetimer;
boost::mpi::communicator world;
int size = world.size();
int rank = world.rank();
if (size > 1)
{
MPI_Allreduce(MPI_IN_PLACE, component.Store(), component.Ncols(), MPI_DOUBLE, MPI_SUM, Calc);
}
dmrginp.datatransfer->stop();
}
#else
void distributedaccumulate(DiagonalMatrix& component) {;}
void distributedaccumulate(SpinAdapted::StackSparseMatrix& component) {;}
#endif
}