{
if (PAR(&(checkpointBroadcast.cr_)))
{
- block_bc(&(checkpointBroadcast.cr_), densityFittingState_.stepsSinceLastCalculation_);
- block_bc(&(checkpointBroadcast.cr_), densityFittingState_.adaptiveForceConstantScale_);
- block_bc(&(checkpointBroadcast.cr_), densityFittingState_.exponentialMovingAverageState_);
+ block_bc(checkpointBroadcast.cr_.mpi_comm_mygroup,
+ densityFittingState_.stepsSinceLastCalculation_);
+ block_bc(checkpointBroadcast.cr_.mpi_comm_mygroup,
+ densityFittingState_.adaptiveForceConstantScale_);
+ block_bc(checkpointBroadcast.cr_.mpi_comm_mygroup,
+ densityFittingState_.exponentialMovingAverageState_);
}
}
}
if (PAR(commRecord_))
{
- gmx_bcast(sizeof(potentialOffset_), &potentialOffset_, commRecord_);
+ gmx_bcast(sizeof(potentialOffset_), &potentialOffset_, commRecord_->mpi_comm_mygroup);
}
for (size_t k = 0; k < biasCoupledToSystem_.size(); k++)
}
if (commRecord->nnodes > 1)
{
- gmx_bcast(arrayRef.size() * sizeof(T), arrayRef.data(), commRecord);
+ gmx_bcast(arrayRef.size() * sizeof(T), arrayRef.data(), commRecord->mpi_comm_mygroup);
}
}
void BiasState::broadcast(const t_commrec* commRecord)
{
- gmx_bcast(sizeof(coordState_), &coordState_, commRecord);
+ gmx_bcast(sizeof(coordState_), &coordState_, commRecord->mpi_comm_mygroup);
- gmx_bcast(points_.size() * sizeof(PointState), points_.data(), commRecord);
+ gmx_bcast(points_.size() * sizeof(PointState), points_.data(), commRecord->mpi_comm_mygroup);
- gmx_bcast(weightSumCovering_.size() * sizeof(double), weightSumCovering_.data(), commRecord);
+ gmx_bcast(weightSumCovering_.size() * sizeof(double), weightSumCovering_.data(),
+ commRecord->mpi_comm_mygroup);
- gmx_bcast(sizeof(histogramSize_), &histogramSize_, commRecord);
+ gmx_bcast(sizeof(histogramSize_), &histogramSize_, commRecord->mpi_comm_mygroup);
}
void BiasState::setFreeEnergyToConvolvedPmf(const std::vector<DimParams>& dimParams, const BiasGrid& grid)
nullptr, ddbox);
}
- gmx_bcast(sizeof(gmx_ddbox_t), ddbox, &cr);
+ gmx_bcast(sizeof(gmx_ddbox_t), ddbox, cr.mpi_comm_mygroup);
}
dd_bonded_cg_distance(mdlog, &mtop, &ir, as_rvec_array(xGlobal.data()), box,
options.checkBondedInteractions, &r_2b, &r_mb);
}
- gmx_bcast(sizeof(r_2b), &r_2b, cr);
- gmx_bcast(sizeof(r_mb), &r_mb, cr);
+ gmx_bcast(sizeof(r_2b), &r_2b, cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(r_mb), &r_mb, cr->mpi_comm_mygroup);
/* We use an initial margin of 10% for the minimum cell size,
* except when we are just below the non-bonded cut-off.
}
/* Communicate the information set by the master to all ranks */
- gmx_bcast(sizeof(numDomains), numDomains, cr);
+ gmx_bcast(sizeof(numDomains), numDomains, cr->mpi_comm_mygroup);
if (EEL_PME(ir.coulombtype))
{
- gmx_bcast(sizeof(numPmeOnlyRanks), &numPmeOnlyRanks, cr);
+ gmx_bcast(sizeof(numPmeOnlyRanks), &numPmeOnlyRanks, cr->mpi_comm_mygroup);
}
DDGridSetup ddGridSetup;
/* Broadcasts the structure data */
static void bc_ed_positions(const t_commrec* cr, struct gmx_edx* s, EssentialDynamicsStructure stype)
{
- snew_bc(cr, s->anrs, s->nr); /* Index numbers */
- snew_bc(cr, s->x, s->nr); /* Positions */
- nblock_bc(cr, s->nr, s->anrs);
- nblock_bc(cr, s->nr, s->x);
+ snew_bc(MASTER(cr), s->anrs, s->nr); /* Index numbers */
+ snew_bc(MASTER(cr), s->x, s->nr); /* Positions */
+ nblock_bc(cr->mpi_comm_mygroup, s->nr, s->anrs);
+ nblock_bc(cr->mpi_comm_mygroup, s->nr, s->x);
/* For the average & reference structures we need an array for the collective indices,
* and we need to broadcast the masses as well */
snew(s->c_ind, s->nr); /* Collective indices */
/* Local atom indices get assigned in dd_make_local_group_indices.
* There, also memory is allocated */
- s->nalloc_loc = 0; /* allocation size of s->anrs_loc */
- snew_bc(cr, s->x_old, s->nr); /* To be able to always make the ED molecule whole, ... */
- nblock_bc(cr, s->nr, s->x_old); /* ... keep track of shift changes with the help of old coords */
+ s->nalloc_loc = 0; /* allocation size of s->anrs_loc */
+ snew_bc(MASTER(cr), s->x_old, s->nr); /* To be able to always make the ED molecule whole, ... */
+ nblock_bc(cr->mpi_comm_mygroup, s->nr,
+ s->x_old); /* ... keep track of shift changes with the help of old coords */
}
/* broadcast masses for the reference structure (for mass-weighted fitting) */
if (stype == EssentialDynamicsStructure::Reference)
{
- snew_bc(cr, s->m, s->nr);
- nblock_bc(cr, s->nr, s->m);
+ snew_bc(MASTER(cr), s->m, s->nr);
+ nblock_bc(cr->mpi_comm_mygroup, s->nr, s->m);
}
/* For the average structure we might need the masses for mass-weighting */
if (stype == EssentialDynamicsStructure::Average)
{
- snew_bc(cr, s->sqrtm, s->nr);
- nblock_bc(cr, s->nr, s->sqrtm);
- snew_bc(cr, s->m, s->nr);
- nblock_bc(cr, s->nr, s->m);
+ snew_bc(MASTER(cr), s->sqrtm, s->nr);
+ nblock_bc(cr->mpi_comm_mygroup, s->nr, s->sqrtm);
+ snew_bc(MASTER(cr), s->m, s->nr);
+ nblock_bc(cr->mpi_comm_mygroup, s->nr, s->m);
}
}
{
int i;
- snew_bc(cr, ev->ieig, ev->neig); /* index numbers of eigenvector */
- snew_bc(cr, ev->stpsz, ev->neig); /* stepsizes per eigenvector */
- snew_bc(cr, ev->xproj, ev->neig); /* instantaneous x projection */
- snew_bc(cr, ev->fproj, ev->neig); /* instantaneous f projection */
- snew_bc(cr, ev->refproj, ev->neig); /* starting or target projection */
+ snew_bc(MASTER(cr), ev->ieig, ev->neig); /* index numbers of eigenvector */
+ snew_bc(MASTER(cr), ev->stpsz, ev->neig); /* stepsizes per eigenvector */
+ snew_bc(MASTER(cr), ev->xproj, ev->neig); /* instantaneous x projection */
+ snew_bc(MASTER(cr), ev->fproj, ev->neig); /* instantaneous f projection */
+ snew_bc(MASTER(cr), ev->refproj, ev->neig); /* starting or target projection */
- nblock_bc(cr, ev->neig, ev->ieig);
- nblock_bc(cr, ev->neig, ev->stpsz);
- nblock_bc(cr, ev->neig, ev->xproj);
- nblock_bc(cr, ev->neig, ev->fproj);
- nblock_bc(cr, ev->neig, ev->refproj);
+ nblock_bc(cr->mpi_comm_mygroup, ev->neig, ev->ieig);
+ nblock_bc(cr->mpi_comm_mygroup, ev->neig, ev->stpsz);
+ nblock_bc(cr->mpi_comm_mygroup, ev->neig, ev->xproj);
+ nblock_bc(cr->mpi_comm_mygroup, ev->neig, ev->fproj);
+ nblock_bc(cr->mpi_comm_mygroup, ev->neig, ev->refproj);
- snew_bc(cr, ev->vec, ev->neig); /* Eigenvector components */
+ snew_bc(MASTER(cr), ev->vec, ev->neig); /* Eigenvector components */
for (i = 0; i < ev->neig; i++)
{
- snew_bc(cr, ev->vec[i], length);
- nblock_bc(cr, length, ev->vec[i]);
+ snew_bc(MASTER(cr), ev->vec[i], length);
+ nblock_bc(cr->mpi_comm_mygroup, length, ev->vec[i]);
}
}
static void broadcast_ed_data(const t_commrec* cr, gmx_edsam* ed)
{
/* Master lets the other nodes know if its ED only or also flooding */
- gmx_bcast(sizeof(ed->eEDtype), &(ed->eEDtype), cr);
+ gmx_bcast(sizeof(ed->eEDtype), &(ed->eEDtype), cr->mpi_comm_mygroup);
int numedis = ed->edpar.size();
/* First let everybody know how many ED data sets to expect */
- gmx_bcast(sizeof(numedis), &numedis, cr);
- nblock_abc(cr, numedis, &(ed->edpar));
+ gmx_bcast(sizeof(numedis), &numedis, cr->mpi_comm_mygroup);
+ nblock_abc(MASTER(cr), cr->mpi_comm_mygroup, numedis, &(ed->edpar));
/* Now transfer the ED data set(s) */
for (auto& edi : ed->edpar)
{
/* Broadcast a single ED data set */
- block_bc(cr, edi);
+ block_bc(cr->mpi_comm_mygroup, edi);
/* Broadcast positions */
bc_ed_positions(cr, &(edi.sref),
/* For harmonic restraints the reference projections can change with time */
if (edi.flood.bHarmonic)
{
- snew_bc(cr, edi.flood.initialReferenceProjection, edi.flood.vecs.neig);
- snew_bc(cr, edi.flood.referenceProjectionSlope, edi.flood.vecs.neig);
- nblock_bc(cr, edi.flood.vecs.neig, edi.flood.initialReferenceProjection);
- nblock_bc(cr, edi.flood.vecs.neig, edi.flood.referenceProjectionSlope);
+ snew_bc(MASTER(cr), edi.flood.initialReferenceProjection, edi.flood.vecs.neig);
+ snew_bc(MASTER(cr), edi.flood.referenceProjectionSlope, edi.flood.vecs.neig);
+ nblock_bc(cr->mpi_comm_mygroup, edi.flood.vecs.neig, edi.flood.initialReferenceProjection);
+ nblock_bc(cr->mpi_comm_mygroup, edi.flood.vecs.neig, edi.flood.referenceProjectionSlope);
}
}
}
for (auto edi = ed->edpar.begin(); edi != ed->edpar.end(); ++edi)
{
/* Allocate space for ED buffer variables */
- snew_bc(cr, edi->buf, 1); /* MASTER has already allocated edi->buf in init_edi() */
+ snew_bc(MASTER(cr), edi->buf, 1); /* MASTER has already allocated edi->buf in init_edi() */
snew(edi->buf->do_edsam, 1);
/* Space for collective ED buffer variables */
}
if (PAR(cr))
{
- gmx_bcast(sizeof(headerContents.step), &headerContents.step, cr);
+ gmx_bcast(sizeof(headerContents.step), &headerContents.step, cr->mpi_comm_mygroup);
gmx::MdModulesCheckpointReadingBroadcast broadcastCheckPointData = { *cr, headerContents.file_version };
mdModulesNotifier.checkpointingNotifications_.notify(broadcastCheckPointData);
}
#endif
}
-void gmx_barrier(const t_commrec gmx_unused* cr)
+void gmx_barrier(MPI_Comm gmx_unused communicator)
{
#if !GMX_MPI
gmx_call("gmx_barrier");
#else
- MPI_Barrier(cr->mpi_comm_mygroup);
+ MPI_Barrier(communicator);
#endif
}
-void gmx_bcast(int gmx_unused nbytes, void gmx_unused* b, const t_commrec gmx_unused* cr)
+void gmx_bcast(int gmx_unused nbytes, void gmx_unused* b, MPI_Comm gmx_unused communicator)
{
#if !GMX_MPI
- gmx_call("gmx_bast");
+ gmx_call("gmx_bcast");
#else
- MPI_Bcast(b, nbytes, MPI_BYTE, MASTERRANK(cr), cr->mpi_comm_mygroup);
-#endif
-}
-
-void gmx_bcast_sim(int gmx_unused nbytes, void gmx_unused* b, const t_commrec gmx_unused* cr)
-{
-#if !GMX_MPI
- gmx_call("gmx_bast");
-#else
- MPI_Bcast(b, nbytes, MPI_BYTE, MASTERRANK(cr), cr->mpi_comm_mysim);
+ MPI_Bcast(b, nbytes, MPI_BYTE, 0, communicator);
#endif
}
void gmx_setup_nodecomm(FILE* fplog, struct t_commrec* cr);
/* Sets up fast global communication for clusters with multi-core nodes */
-void gmx_barrier(const struct t_commrec* cr);
-/* Wait till all processes in cr->mpi_comm_mygroup have reached the barrier */
+//! Wait until all processes in communicator have reached the barrier
+void gmx_barrier(MPI_Comm communicator);
-void gmx_bcast(int nbytes, void* b, const struct t_commrec* cr);
-/* Broadcast nbytes bytes from the master to cr->mpi_comm_mygroup */
-
-void gmx_bcast_sim(int nbytes, void* b, const struct t_commrec* cr);
-/* Broadcast nbytes bytes from the sim master to cr->mpi_comm_mysim */
+//! Broadcast nbytes bytes from the master to communicator
+void gmx_bcast(int nbytes, void* b, MPI_Comm communicator);
void gmx_sumi(int nr, int r[], const struct t_commrec* cr);
/* Calculate the global sum of an array of ints */
#include "gromacs/mdtypes/inputrec.h"
#include "gromacs/mdtypes/md_enums.h"
#include "gromacs/mdtypes/mdrunoptions.h"
+#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/state.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/timing/wallcycle.h"
/* Notify the other nodes whether we are still connected. */
if (PAR(cr))
{
- block_bc(cr, bConnected);
+ block_bc(cr->mpi_comm_mygroup, bConnected);
}
/* ...if not connected, the job is done here. */
/* Let the other nodes know whether we got a new IMD synchronization frequency. */
if (PAR(cr))
{
- block_bc(cr, nstimd_new);
+ block_bc(cr->mpi_comm_mygroup, nstimd_new);
}
/* Now we all set the (new) nstimd communication time step */
/* make new_forces known to the clients */
if (PAR(cr))
{
- block_bc(cr, new_nforces);
+ block_bc(cr->mpi_comm_mygroup, new_nforces);
}
/* When new_natoms < 0 then we know that these are still the same forces
/* In parallel mode we communicate the to-be-applied forces to the other nodes */
if (PAR(cr))
{
- nblock_bc(cr, nforces, f_ind);
- nblock_bc(cr, nforces, f);
+ nblock_bc(cr->mpi_comm_mygroup, nforces, f_ind);
+ nblock_bc(cr->mpi_comm_mygroup, nforces, f);
}
/* done communicating the forces, reset bNewForces */
/* Communicate initial coordinates xa_old to all processes */
if (PAR(cr))
{
- gmx_bcast(nat * sizeof(xa_old[0]), xa_old, cr);
+ gmx_bcast(nat * sizeof(xa_old[0]), xa_old, cr->mpi_comm_mygroup);
}
}
/* Let the other nodes know whether we want IMD */
if (PAR(cr))
{
- block_bc(cr, createSession);
+ block_bc(cr->mpi_comm_mygroup, createSession);
}
/*... if not we are done.*/
/* do we allow interactive pulling? If so let the other nodes know. */
if (PAR(cr))
{
- block_bc(cr, impl->bForceActivated);
+ block_bc(cr->mpi_comm_mygroup, impl->bForceActivated);
}
/* setup the listening socket on master process */
{
check_multi_int(fplog, ms, dd->nsystems, "the number of systems per ensemble", FALSE);
}
- gmx_bcast_sim(sizeof(int), &dd->nsystems, cr);
+ gmx_bcast(sizeof(int), &dd->nsystems, cr->mpi_comm_mysim);
/* We use to allow any value of nsystems which was a divisor
* of ms->nsim. But this required an extra communicator which
if (DOMAINDECOMP(cr))
{
- gmx_bcast(2 * dd->nres, dd->Rt_6, cr);
+ gmx_bcast(2 * dd->nres, dd->Rt_6, cr->mpi_comm_mygroup);
}
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
}
if (PAR(cr))
{
- gmx_bcast(sizeof(box), box, cr);
+ gmx_bcast(sizeof(box), box, cr->mpi_comm_mygroup);
}
return std::make_unique<BoxDeformation>(inputrec.delta_t, inputrec.init_step, inputrec.deform, box);
#include "broadcaststructs.h"
#include "gromacs/fileio/tpxio.h"
-#include "gromacs/gmxlib/network.h"
-#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/state.h"
template<typename AllocatorType>
-static void bcastPaddedRVecVector(const t_commrec* cr, gmx::PaddedVector<gmx::RVec, AllocatorType>* v, int numAtoms)
+static void bcastPaddedRVecVector(MPI_Comm communicator,
+ gmx::PaddedVector<gmx::RVec, AllocatorType>* v,
+ int numAtoms)
{
v->resizeWithPadding(numAtoms);
- nblock_bc(cr, makeArrayRef(*v));
+ nblock_bc(communicator, makeArrayRef(*v));
}
-void broadcastStateWithoutDynamics(const t_commrec* cr, t_state* state)
+void broadcastStateWithoutDynamics(MPI_Comm communicator,
+ bool useDomainDecomposition,
+ bool isParallelRun,
+ t_state* state)
{
- GMX_RELEASE_ASSERT(!DOMAINDECOMP(cr),
+ GMX_RELEASE_ASSERT(!useDomainDecomposition,
"broadcastStateWithoutDynamics should only be used for special cases "
"without domain decomposition");
- if (!PAR(cr))
+ if (!isParallelRun)
{
return;
}
/* Broadcasts the state sizes and flags from the master to all ranks
* in cr->mpi_comm_mygroup.
*/
- block_bc(cr, state->natoms);
- block_bc(cr, state->flags);
+ block_bc(communicator, state->natoms);
+ block_bc(communicator, state->flags);
for (int i = 0; i < estNR; i++)
{
{
switch (i)
{
- case estLAMBDA: nblock_bc(cr, efptNR, state->lambda.data()); break;
- case estFEPSTATE: block_bc(cr, state->fep_state); break;
- case estBOX: block_bc(cr, state->box); break;
- case estX: bcastPaddedRVecVector(cr, &state->x, state->natoms); break;
+ case estLAMBDA: nblock_bc(communicator, efptNR, state->lambda.data()); break;
+ case estFEPSTATE: block_bc(communicator, state->fep_state); break;
+ case estBOX: block_bc(communicator, state->box); break;
+ case estX: bcastPaddedRVecVector(communicator, &state->x, state->natoms); break;
default:
GMX_RELEASE_ASSERT(false,
"The state has a dynamic entry, while no dynamic entries "
}
}
-static void bc_tpxheader(const t_commrec* cr, TpxFileHeader* tpx)
+static void bc_tpxheader(MPI_Comm communicator, TpxFileHeader* tpx)
{
- block_bc(cr, tpx->bIr);
- block_bc(cr, tpx->bBox);
- block_bc(cr, tpx->bTop);
- block_bc(cr, tpx->bX);
- block_bc(cr, tpx->bV);
- block_bc(cr, tpx->bF);
- block_bc(cr, tpx->natoms);
- block_bc(cr, tpx->ngtc);
- block_bc(cr, tpx->lambda);
- block_bc(cr, tpx->fep_state);
- block_bc(cr, tpx->sizeOfTprBody);
- block_bc(cr, tpx->fileVersion);
- block_bc(cr, tpx->fileGeneration);
- block_bc(cr, tpx->isDouble);
+ block_bc(communicator, tpx->bIr);
+ block_bc(communicator, tpx->bBox);
+ block_bc(communicator, tpx->bTop);
+ block_bc(communicator, tpx->bX);
+ block_bc(communicator, tpx->bV);
+ block_bc(communicator, tpx->bF);
+ block_bc(communicator, tpx->natoms);
+ block_bc(communicator, tpx->ngtc);
+ block_bc(communicator, tpx->lambda);
+ block_bc(communicator, tpx->fep_state);
+ block_bc(communicator, tpx->sizeOfTprBody);
+ block_bc(communicator, tpx->fileVersion);
+ block_bc(communicator, tpx->fileGeneration);
+ block_bc(communicator, tpx->isDouble);
}
-static void bc_tprCharBuffer(const t_commrec* cr, std::vector<char>* charBuffer)
+static void bc_tprCharBuffer(MPI_Comm communicator, bool isMasterRank, std::vector<char>* charBuffer)
{
int elements = charBuffer->size();
- block_bc(cr, elements);
+ block_bc(communicator, elements);
- nblock_abc(cr, elements, charBuffer);
+ nblock_abc(isMasterRank, communicator, elements, charBuffer);
}
-void init_parallel(t_commrec* cr, t_inputrec* inputrec, gmx_mtop_t* mtop, PartialDeserializedTprFile* partialDeserializedTpr)
+void init_parallel(MPI_Comm communicator,
+ bool isMasterRank,
+ t_inputrec* inputrec,
+ gmx_mtop_t* mtop,
+ PartialDeserializedTprFile* partialDeserializedTpr)
{
- bc_tpxheader(cr, &partialDeserializedTpr->header);
- bc_tprCharBuffer(cr, &partialDeserializedTpr->body);
- if (!MASTER(cr))
+ bc_tpxheader(communicator, &partialDeserializedTpr->header);
+ bc_tprCharBuffer(communicator, isMasterRank, &partialDeserializedTpr->body);
+ if (!isMasterRank)
{
completeTprDeserialization(partialDeserializedTpr, inputrec, mtop);
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2016,2018,2019, by the GROMACS development team, led by
+ * Copyright (c) 2016,2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#include <vector>
#include "gromacs/gmxlib/network.h"
-#include "gromacs/mdtypes/commrec.h"
#include "gromacs/utility/arrayref.h"
-#include "gromacs/utility/smalloc.h"
struct gmx_mtop_t;
-struct t_commrec;
struct t_inputrec;
struct PartialDeserializedTprFile;
class t_state;
-//! Convenience wrapper for gmx_bcast of a single value.
+//! Convenience wrapper for gmx_bcast to communicator of a single value.
template<typename T>
-void block_bc(const t_commrec* cr, T& data)
+void block_bc(MPI_Comm communicator, T& data)
{
- gmx_bcast(sizeof(T), static_cast<void*>(&data), cr);
+ gmx_bcast(sizeof(T), static_cast<void*>(&data), communicator);
}
-//! Convenience wrapper for gmx_bcast of a C-style array.
+//! Convenience wrapper for gmx_bcast to communicator of a C-style array.
template<typename T>
-void nblock_bc(const t_commrec* cr, int numElements, T* data)
+void nblock_bc(MPI_Comm communicator, int numElements, T* data)
{
- gmx_bcast(numElements * sizeof(T), static_cast<void*>(data), cr);
+ gmx_bcast(numElements * sizeof(T), static_cast<void*>(data), communicator);
}
-//! Convenience wrapper for gmx_bcast of an ArrayRef<T>
+//! Convenience wrapper for gmx_bcast to communicator of an ArrayRef<T>
template<typename T>
-void nblock_bc(const t_commrec* cr, gmx::ArrayRef<T> data)
+void nblock_bc(MPI_Comm communicator, gmx::ArrayRef<T> data)
{
- gmx_bcast(data.size() * sizeof(T), static_cast<void*>(data.data()), cr);
+ gmx_bcast(data.size() * sizeof(T), static_cast<void*>(data.data()), communicator);
}
//! Convenience wrapper for allocation with snew of vectors that need allocation on non-master ranks.
template<typename T>
-void snew_bc(const t_commrec* cr, T*& data, int numElements)
+void snew_bc(bool isMasterRank, T*& data, int numElements)
{
- if (!MASTER(cr))
+ if (!isMasterRank)
{
snew(data, numElements);
}
}
//! Convenience wrapper for gmx_bcast of a C-style array which needs allocation on non-master ranks.
template<typename T>
-void nblock_abc(const t_commrec* cr, int numElements, T** v)
+void nblock_abc(bool isMasterRank, MPI_Comm communicator, int numElements, T** v)
{
- snew_bc(cr, v, numElements);
- nblock_bc(cr, numElements, *v);
+ snew_bc(isMasterRank, v, numElements);
+ nblock_bc(communicator, numElements, *v);
}
//! Convenience wrapper for gmx_bcast of a std::vector which needs resizing on non-master ranks.
template<typename T>
-void nblock_abc(const t_commrec* cr, int numElements, std::vector<T>* v)
+void nblock_abc(bool isMasterRank, MPI_Comm communicator, int numElements, std::vector<T>* v)
{
- if (!MASTER(cr))
+ if (!isMasterRank)
{
v->resize(numElements);
}
- gmx_bcast(numElements * sizeof(T), v->data(), cr);
+ gmx_bcast(numElements * sizeof(T), v->data(), communicator);
}
//! \brief Broadcasts the, non-dynamic, state from the master to all ranks in cr->mpi_comm_mygroup
//
// This is intended to be used with MPI parallelization without
// domain decompostion (currently with NM and TPI).
-void broadcastStateWithoutDynamics(const t_commrec* cr, t_state* state);
+void broadcastStateWithoutDynamics(MPI_Comm communicator,
+ bool useDomainDecomposition,
+ bool isParallelRun,
+ t_state* state);
//! \brief Broadcast inputrec and mtop and allocate node-specific settings
-void init_parallel(t_commrec* cr,
+void init_parallel(MPI_Comm communicator,
+ bool isMasterRank,
t_inputrec* inputrec,
gmx_mtop_t* mtop,
PartialDeserializedTprFile* partialDeserializedTpr);
}
xp = fr->x;
vp = fr->v;
- gmx_bcast(sizeof(*fr), fr, cr);
+ gmx_bcast(sizeof(*fr), fr, cr->mpi_comm_mygroup);
fr->x = xp;
fr->v = vp;
gmx_sum_sim(eglsNR, mpiBuffer_.data(), ms_);
}
// Communicate the signals from the master to the others.
- gmx_bcast(eglsNR * sizeof(mpiBuffer_[0]), mpiBuffer_.data(), cr_);
+ gmx_bcast(eglsNR * sizeof(mpiBuffer_[0]), mpiBuffer_.data(), cr_->mpi_comm_mygroup);
}
void SimulationSignaller::setSignals()
if (PAR(cr))
{
- gmx_bcast(sizeof(n), &n, cr);
+ gmx_bcast(sizeof(n), &n, cr->mpi_comm_mygroup);
for (i = 0; i < n; i++)
{
- gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinh[0][0]), ekind->tcstat[i].ekinh[0], cr);
- gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinf[0][0]), ekind->tcstat[i].ekinf[0], cr);
+ gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinh[0][0]), ekind->tcstat[i].ekinh[0],
+ cr->mpi_comm_mygroup);
+ gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinf[0][0]), ekind->tcstat[i].ekinf[0],
+ cr->mpi_comm_mygroup);
gmx_bcast(DIM * DIM * sizeof(ekind->tcstat[i].ekinh_old[0][0]),
- ekind->tcstat[i].ekinh_old[0], cr);
-
- gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc), &(ekind->tcstat[i].ekinscalef_nhc), cr);
- gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc), &(ekind->tcstat[i].ekinscaleh_nhc), cr);
- gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc), &(ekind->tcstat[i].vscale_nhc), cr);
+ ekind->tcstat[i].ekinh_old[0], cr->mpi_comm_mygroup);
+
+ gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc), &(ekind->tcstat[i].ekinscalef_nhc),
+ cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc), &(ekind->tcstat[i].ekinscaleh_nhc),
+ cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc), &(ekind->tcstat[i].vscale_nhc),
+ cr->mpi_comm_mygroup);
}
- gmx_bcast(DIM * DIM * sizeof(ekind->ekin[0][0]), ekind->ekin[0], cr);
+ gmx_bcast(DIM * DIM * sizeof(ekind->ekin[0][0]), ekind->ekin[0], cr->mpi_comm_mygroup);
- gmx_bcast(sizeof(ekind->dekindl), &ekind->dekindl, cr);
- gmx_bcast(sizeof(ekind->cosacc.mvcos), &ekind->cosacc.mvcos, cr);
+ gmx_bcast(sizeof(ekind->dekindl), &ekind->dekindl, cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(ekind->cosacc.mvcos), &ekind->cosacc.mvcos, cr->mpi_comm_mygroup);
}
}
bool hasReadEkinState = MASTER(cr) ? state_global->ekinstate.hasReadEkinState : false;
if (PAR(cr))
{
- gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr);
+ gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr->mpi_comm_mygroup);
}
if (hasReadEkinState)
{
"The initial step is not consistent across multi simulations which "
"share the state");
}
- gmx_barrier(cr);
+ gmx_barrier(cr->mpi_comm_mygroup);
}
else
{
imdSession, pull_work, state, &f, mdAtoms, &top, fr, vsite, constr,
nrnb, nullptr, FALSE);
shouldCheckNumberOfBondedInteractions = true;
- gmx_bcast(sizeof(ir->nsteps), &ir->nsteps, cr);
+ gmx_bcast(sizeof(ir->nsteps), &ir->nsteps, cr->mpi_comm_mygroup);
}
else
{
{
inputrec = &inputrecInstance;
}
- init_parallel(cr, inputrec, &mtop, partialDeserializedTpr.get());
+ init_parallel(cr->mpi_comm_mygroup, MASTER(cr), inputrec, &mtop, partialDeserializedTpr.get());
}
GMX_RELEASE_ASSERT(inputrec != nullptr, "All ranks should have a valid inputrec now");
partialDeserializedTpr.reset(nullptr);
{
globalState = std::make_unique<t_state>();
}
- broadcastStateWithoutDynamics(cr, globalState.get());
+ broadcastStateWithoutDynamics(cr->mpi_comm_mygroup, DOMAINDECOMP(cr), PAR(cr), globalState.get());
}
/* A parallel command line option consistency check that we can
if (PAR(cr))
{
- gmx_bcast(sizeof(box), box, cr);
+ gmx_bcast(sizeof(box), box, cr->mpi_comm_mygroup);
}
if (inputrec->cutoff_scheme != ecutsVERLET)
}
}
- // Produce the task assignment for this rank.
+ // Produce the task assignment for this rank - done after DD is constructed
GpuTaskAssignmentsBuilder gpuTaskAssignmentsBuilder;
GpuTaskAssignments gpuTaskAssignments = gpuTaskAssignmentsBuilder.build(
gpuIdsToUse, userGpuTaskAssignment, *hwinfo, communicator, physicalNodeComm,
/* Master synchronizes its value of reset_counters with all nodes
* including PME only nodes */
int64_t reset_counters = wcycle_get_reset_counters(wcycle);
- gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
+ gmx_bcast(sizeof(reset_counters), &reset_counters, cr->mpi_comm_mysim);
wcycle_set_reset_counters(wcycle, reset_counters);
}
if (cr->npmenodes > 0)
{
/* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
- gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
- gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
+ gmx_bcast(sizeof(nChargePerturbed), &nChargePerturbed, cr->mpi_comm_mysim);
+ gmx_bcast(sizeof(nTypePerturbed), &nTypePerturbed, cr->mpi_comm_mysim);
}
if (thisRankHasDuty(cr, DUTY_PME))
bool hasReadEkinState = MASTER(cr) ? state_global->ekinstate.hasReadEkinState : false;
if (PAR(cr))
{
- gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr);
+ gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr->mpi_comm_mygroup);
}
if (hasReadEkinState)
{
"date prev. COM "
"to bcast here as well as to e.g. checkpointing");
- gmx_bcast(sizeof(dvec), group.x_prev_step, cr);
+ gmx_bcast(sizeof(dvec), group.x_prev_step, cr->mpi_comm_mygroup);
}
}
}
if (PAR(cr))
{
/* Only the master rank has the checkpointed COM from the previous step */
- gmx_bcast(sizeof(double) * state->pull_com_prev_step.size(), &state->pull_com_prev_step[0], cr);
+ gmx_bcast(sizeof(double) * state->pull_com_prev_step.size(),
+ &state->pull_com_prev_step[0], cr->mpi_comm_mygroup);
}
setPrevStepPullComFromState(pull_work, state);
}
#if GMX_MPI
if (PAR(cr))
{
- gmx_bcast(sizeof(erg->xc_center), erg->xc_center, cr);
+ gmx_bcast(sizeof(erg->xc_center), erg->xc_center, cr->mpi_comm_mygroup);
}
#endif
}
#if GMX_MPI
if (PAR(cr))
{
- gmx_bcast(erg->rotg->nat * sizeof(erg->xc_old[0]), erg->xc_old, cr);
+ gmx_bcast(erg->rotg->nat * sizeof(erg->xc_old[0]), erg->xc_old, cr->mpi_comm_mygroup);
}
#endif
}
{
// Note: this assumes that all ranks are hitting this line, which is not generally true.
// I need to find the right subcommunicator. What I really want is a _scoped_ communicator...
- gmx_barrier(&cr);
+ gmx_barrier(cr.mpi_comm_mygroup);
}
// Apply restraint on all thread ranks only after any updates have been made.
for (ic = 0; ic < eCompNR; ic++)
{
- gmx_bcast(sizeof(g->comp[ic].nMolReq), &(g->comp[ic].nMolReq), cr);
- gmx_bcast(sizeof(g->comp[ic].nMol), &(g->comp[ic].nMol), cr);
- gmx_bcast(swap->nAverage * sizeof(g->comp[ic].nMolPast[0]), g->comp[ic].nMolPast, cr);
+ gmx_bcast(sizeof(g->comp[ic].nMolReq), &(g->comp[ic].nMolReq), cr->mpi_comm_mygroup);
+ gmx_bcast(sizeof(g->comp[ic].nMol), &(g->comp[ic].nMol), cr->mpi_comm_mygroup);
+ gmx_bcast(swap->nAverage * sizeof(g->comp[ic].nMolPast[0]), g->comp[ic].nMolPast,
+ cr->mpi_comm_mygroup);
}
}
}
for (int ig = eGrpSplit0; ig <= eGrpSplit1; ig++)
{
g = &(s->group[ig]);
- gmx_bcast((g->atomset.numAtomsGlobal()) * sizeof((g->xc_old)[0]), g->xc_old, (cr));
+ gmx_bcast((g->atomset.numAtomsGlobal()) * sizeof((g->xc_old)[0]), g->xc_old,
+ cr->mpi_comm_mygroup);
}
}
/* Broadcast the random number array to the other nodes */
if (PAR(cr))
{
- nblock_bc(cr, xtot, numbers);
+ nblock_bc(cr->mpi_comm_mygroup, xtot, numbers);
}
if (bVerbose && MASTER(cr))
if (PAR(cr))
{
/* Transfer the number of charges */
- block_bc(cr, nq);
- snew_bc(cr, *x, nq);
- snew_bc(cr, *q, nq);
- nblock_bc(cr, nq, *x);
- nblock_bc(cr, nq, *q);
+ block_bc(cr->mpi_comm_mygroup, nq);
+ snew_bc(MASTER(cr), *x, nq);
+ snew_bc(MASTER(cr), *q, nq);
+ nblock_bc(cr->mpi_comm_mygroup, nq, *x);
+ nblock_bc(cr->mpi_comm_mygroup, nq, *q);
}
return nq;
/* Transfer what we need for parallelizing the reciprocal error estimate */
static void bcast_info(t_inputinfo* info, const t_commrec* cr)
{
- nblock_bc(cr, info->n_entries, info->nkx);
- nblock_bc(cr, info->n_entries, info->nky);
- nblock_bc(cr, info->n_entries, info->nkz);
- nblock_bc(cr, info->n_entries, info->ewald_beta);
- nblock_bc(cr, info->n_entries, info->pme_order);
- nblock_bc(cr, info->n_entries, info->e_dir);
- nblock_bc(cr, info->n_entries, info->e_rec);
- block_bc(cr, info->volume);
- block_bc(cr, info->recipbox);
- block_bc(cr, info->natoms);
- block_bc(cr, info->fracself);
- block_bc(cr, info->bTUNE);
- block_bc(cr, info->q2all);
- block_bc(cr, info->q2allnr);
+ nblock_bc(cr->mpi_comm_mygroup, info->n_entries, info->nkx);
+ nblock_bc(cr->mpi_comm_mygroup, info->n_entries, info->nky);
+ nblock_bc(cr->mpi_comm_mygroup, info->n_entries, info->nkz);
+ nblock_bc(cr->mpi_comm_mygroup, info->n_entries, info->ewald_beta);
+ nblock_bc(cr->mpi_comm_mygroup, info->n_entries, info->pme_order);
+ nblock_bc(cr->mpi_comm_mygroup, info->n_entries, info->e_dir);
+ nblock_bc(cr->mpi_comm_mygroup, info->n_entries, info->e_rec);
+ block_bc(cr->mpi_comm_mygroup, info->volume);
+ block_bc(cr->mpi_comm_mygroup, info->recipbox);
+ block_bc(cr->mpi_comm_mygroup, info->natoms);
+ block_bc(cr->mpi_comm_mygroup, info->fracself);
+ block_bc(cr->mpi_comm_mygroup, info->bTUNE);
+ block_bc(cr->mpi_comm_mygroup, info->q2all);
+ block_bc(cr->mpi_comm_mygroup, info->q2allnr);
}