Skip to content

Commit

Permalink
Merge pull request #739 from mcianfrocco/master
Browse files Browse the repository at this point in the history
Updated code with inclusive terms leader and follower
  • Loading branch information
scheres authored Feb 24, 2021
2 parents da0b41c + 290b43b commit 7c1fc17
Show file tree
Hide file tree
Showing 13 changed files with 375 additions and 377 deletions.
12 changes: 6 additions & 6 deletions src/autopicker_mpi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,17 @@ void AutoPickerMpi::read(int argc, char **argv)
// Define a new MpiNode
node = new MpiNode(argc, argv);

if (node->isMaster())
if (node->isLeader())
PRINT_VERSION_INFO();

// First read in non-parallelisation-dependent variables
AutoPicker::read(argc, argv);

// Don't put any output to screen for mpi slaves
if (!node->isMaster())
// Don't put any output to screen for mpi followers
if (!node->isLeader())
verb = 0;

if (do_write_fom_maps && node->isMaster())
if (do_write_fom_maps && node->isLeader())
std::cerr << "WARNING : --write_fom_maps is very heavy on disc I/O and is not advised in parallel execution. If possible, using --shrink 0 and lowpass makes I/O less significant." << std::endl;

// Possibly also read parallelisation-dependent variables here
Expand All @@ -60,9 +60,9 @@ int AutoPickerMpi::deviceInitialise()
else
dev_id = textToInteger((allThreadIDs[node->rank][0]).c_str());

for (int slave = 0; slave < node->size; slave++)
for (int follower = 0; follower < node->size; follower++)
{
if (slave == node->rank)
if (follower == node->rank)
{
std::cout << " + Using GPU device: " << dev_id << " on MPI node: " << node->rank << std::endl;
std::cout.flush();
Expand Down
8 changes: 4 additions & 4 deletions src/ctffind_runner_mpi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ void CtffindRunnerMpi::read(int argc, char **argv)
// First read in non-parallelisation-dependent variables
CtffindRunner::read(argc, argv);

// Don't put any output to screen for mpi slaves
verb = (node->isMaster()) ? 1 : 0;
// Don't put any output to screen for mpi followers
verb = (node->isLeader()) ? 1 : 0;

// Possibly also read parallelisation-dependent variables here

Expand Down Expand Up @@ -98,8 +98,8 @@ void CtffindRunnerMpi::run()

MPI_Barrier(MPI_COMM_WORLD);

// Only the master writes the joined result file
if (node->isMaster())
// Only the leader writes the joined result file
if (node->isLeader())
{
joinCtffindResults();
}
Expand Down
8 changes: 4 additions & 4 deletions src/jaz/ctf/ctf_refiner_mpi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ void CtfRefinerMpi::read(int argc, char **argv)
// First read in non-parallelisation-dependent variables
CtfRefiner::read(argc, argv);

// Don't put any output to screen for mpi slaves
verb = (node->isMaster()) ? verb : 0;
// Don't put any output to screen for mpi followers
verb = (node->isLeader()) ? verb : 0;

// Possibly also read parallelisation-dependent variables here
if (node->size < 2)
Expand All @@ -51,15 +51,15 @@ void CtfRefinerMpi::run()
// Each node does part of the work
long int my_first_micrograph, my_last_micrograph;
divide_equally(total_nr_micrographs, node->size, node->rank, my_first_micrograph, my_last_micrograph);

if (do_defocus_fit || do_bfac_fit || do_tilt_fit || do_aberr_fit || do_mag_fit)
{
processSubsetMicrographs(my_first_micrograph, my_last_micrograph);
}

MPI_Barrier(MPI_COMM_WORLD);

if (node->isMaster())
if (node->isLeader())
{
finalise();
}
Expand Down
14 changes: 7 additions & 7 deletions src/jaz/motion/motion_refiner_mpi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,16 @@ void MotionRefinerMpi::read(int argc, char **argv)
// First read in non-parallelisation-dependent variables
MotionRefiner::read(argc, argv);

// Don't put any output to screen for mpi slaves
verb = (node->isMaster()) ? verb : 0;
// Don't put any output to screen for mpi followers
verb = (node->isLeader()) ? verb : 0;

// Possibly also read parallelisation-dependent variables here
if (node->size < 2)
{
REPORT_ERROR("ERROR: this program needs to be run with at least two MPI processes!");
}

if (node->isMaster() && (motionParamEstimator.anythingToDo()))
if (node->isLeader() && (motionParamEstimator.anythingToDo()))
{
REPORT_ERROR("Parameter estimation is not supported in MPI mode.");
return;
Expand Down Expand Up @@ -80,11 +80,11 @@ void MotionRefinerMpi::run()
divide_equally(total_nr_micrographs, node->size, node->rank,
my_first_micrograph, my_last_micrograph);
my_nr_micrographs = my_last_micrograph - my_first_micrograph + 1;

double k_out_A = reference.pixToAng(reference.k_out);

frameRecombiner.init(
allMdts,
allMdts,
verb, reference.s, fc, k_out_A, reference.angpix,
nr_omp_threads, outPath, debug,
&reference, &obsModel, &micrographHandler);
Expand All @@ -94,7 +94,7 @@ void MotionRefinerMpi::run()

MPI_Barrier(MPI_COMM_WORLD);

if (generateStar && node->isMaster())
if (generateStar && node->isLeader())
{
combineEPSAndSTARfiles();
}
Expand Down
Loading

0 comments on commit 7c1fc17

Please sign in to comment.