From: Berk Hess Date: Tue, 5 Oct 2021 15:34:58 +0000 (+0000) Subject: Make AWH bias sharing more flexible X-Git-Url: http://biod.pnpi.spb.ru/gitweb/?a=commitdiff_plain;h=bbcd5f1e6c1ee8d23a8a122c2104ad85efc9888e;p=alexxy%2Fgromacs.git Make AWH bias sharing more flexible --- diff --git a/docs/release-notes/2022/major/features.rst b/docs/release-notes/2022/major/features.rst index 9763b63ab7..e3eb257802 100644 --- a/docs/release-notes/2022/major/features.rst +++ b/docs/release-notes/2022/major/features.rst @@ -41,3 +41,10 @@ A new formulation of soft-core interactions for free energy calculations With this addition Gromacs allows to choose from two schemes to soften non-bonded interactions during alchemical perturbations: Beutler *et al.*\ :ref:`100 ` and Gapsys *et al.*\ :ref:`185 ` soft-core functions. + +More flexible sharing of biases in AWH +"""""""""""""""""""""""""""""""""""""" + +With the accelerated weight histogram method, biases can now be shared between +subsets of all simulations, without restrictions. The allows for more flexible +ensemble simulation setups, as well as simpler launches of sets of simulations. diff --git a/docs/user-guide/mdp-options.rst b/docs/user-guide/mdp-options.rst index e2e0b95688..c9ce3a20a0 100644 --- a/docs/user-guide/mdp-options.rst +++ b/docs/user-guide/mdp-options.rst @@ -2102,8 +2102,8 @@ AWH adaptive biasing :ref:`gmx mdrun` option ``-multidir`` the bias will also be shared across simulations. Sharing may increase convergence initially, although the starting configurations can be critical, especially when sharing between many biases. - Currently, positive group values should start at 1 and increase - by 1 for each subsequent bias that is shared. + Currently, the share value should increase with increasing bias index + (or be 0). .. mdp:: awh1-ndim diff --git a/src/gromacs/applied_forces/awh/awh.cpp b/src/gromacs/applied_forces/awh/awh.cpp index 1455c0776a..e04618af08 100644 --- a/src/gromacs/applied_forces/awh/awh.cpp +++ b/src/gromacs/applied_forces/awh/awh.cpp @@ -173,7 +173,6 @@ Awh::Awh(FILE* fplog, seed_(awhParams.seed()), nstout_(awhParams.nstout()), commRecord_(commRecord), - multiSimRecord_(multiSimRecord), pull_(pull_work), potentialOffset_(0), numFepLambdaStates_(numFepLambdaStates), @@ -204,10 +203,29 @@ Awh::Awh(FILE* fplog, "biases is only supported between simulations")); } - int numSharingSimulations = 1; - if (awhParams.shareBiasMultisim() && isMultiSim(multiSimRecord_)) + if (awhParams.shareBiasMultisim() && multiSimRecord != nullptr) { - numSharingSimulations = multiSimRecord_->numSimulations_; + GMX_RELEASE_ASSERT(commRecord, "Need a valid commRecord"); + biasSharing_ = std::make_unique(awhParams, *commRecord, multiSimRecord->mastersComm_); + if (fplog) + { + for (int k = 0; k < awhParams.numBias(); k++) + { + const int shareGroup = awhParams.awhBiasParams()[k].shareGroup(); + if (shareGroup > 0) + { + fprintf(fplog, + "awh%d: bias with share group %d is shared between %d simulations\n", + 1 + k, + shareGroup, + biasSharing_->numSharingSimulations(k)); + } + else + { + fprintf(fplog, "awh%d: bias is not shared between simulations\n", 1 + k); + } + } + } } /* Initialize all the biases */ @@ -254,7 +272,7 @@ Awh::Awh(FILE* fplog, dimParams, beta, inputRecord.delta_t, - numSharingSimulations, + biasSharing_.get(), biasInitFilename, thisRankWillDoIO), pullCoordIndex); @@ -265,7 +283,7 @@ Awh::Awh(FILE* fplog, /* Need to register the AWH coordinates to be allowed to apply forces to the pull coordinates. */ registerAwhWithPull(awhParams, pull_); - if (numSharingSimulations > 1 && MASTER(commRecord_)) + if (biasSharing_ && MASTER(commRecord_)) { std::vector pointSize; for (auto const& biasCts : biasCoupledToSystem_) @@ -273,7 +291,7 @@ Awh::Awh(FILE* fplog, pointSize.push_back(biasCts.bias_.state().points().size()); } /* Ensure that the shared biased are compatible between simulations */ - biasesAreCompatibleForSharingBetweenSimulations(awhParams, pointSize, multiSimRecord_); + biasesAreCompatibleForSharingBetweenSimulations(awhParams, pointSize, *biasSharing_); } } @@ -346,8 +364,6 @@ real Awh::applyBiasForcesAndUpdateBias(PbcType pbcType, neighborLambdaDhdl, &biasPotential, &biasPotentialJump, - commRecord_, - multiSimRecord_, t, step, seed_, diff --git a/src/gromacs/applied_forces/awh/awh.h b/src/gromacs/applied_forces/awh/awh.h index 89f358e532..7940118082 100644 --- a/src/gromacs/applied_forces/awh/awh.h +++ b/src/gromacs/applied_forces/awh/awh.h @@ -91,6 +91,7 @@ struct AwhHistory; class AwhParams; class Bias; struct BiasCoupledToSystem; +class BiasSharing; class ForceWithVirial; /*! \libinternal @@ -278,9 +279,10 @@ private: std::vector biasCoupledToSystem_; /**< AWH biases and definitions of their coupling to the system. */ const int64_t seed_; /**< Random seed for MC jumping with umbrella type bias potential. */ const int nstout_; /**< Interval in steps for writing to energy file. */ - const t_commrec* commRecord_; /**< Pointer to the communication record. */ - const gmx_multisim_t* multiSimRecord_; /**< Handler for multi-simulations. */ - pull_t* pull_; /**< Pointer to the pull working data. */ + const t_commrec* commRecord_; /**< Pointer to the communication record. */ + //! Object for sharing bias between simulations, only set when needed + std::unique_ptr biasSharing_; + pull_t* pull_; /**< Pointer to the pull working data. */ double potentialOffset_; /**< The offset of the bias potential which changes due to bias updates. */ const int numFepLambdaStates_; /**< The number of free energy lambda states of the system. */ int fepLambdaState_; /**< The current free energy lambda state. */ diff --git a/src/gromacs/applied_forces/awh/bias.cpp b/src/gromacs/applied_forces/awh/bias.cpp index ea6d4a0795..3caffe06c1 100644 --- a/src/gromacs/applied_forces/awh/bias.cpp +++ b/src/gromacs/applied_forces/awh/bias.cpp @@ -67,6 +67,7 @@ #include "gromacs/utility/gmxassert.h" #include "gromacs/utility/stringutil.h" +#include "biassharing.h" #include "correlationgrid.h" #include "correlationhistory.h" #include "pointstate.h" @@ -107,8 +108,6 @@ gmx::ArrayRef Bias::calcForceAndUpdateBias(const awh_dvec ArrayRef neighborLambdaDhdl, double* awhPotential, double* potentialJump, - const t_commrec* commRecord, - const gmx_multisim_t* ms, double t, int64_t step, int64_t seed, @@ -211,7 +210,7 @@ gmx::ArrayRef Bias::calcForceAndUpdateBias(const awh_dvec if (params_.isUpdateFreeEnergyStep(step)) { state_.updateFreeEnergyAndAddSamplesToHistogram( - dimParams_, grid_, params_, commRecord, ms, t, step, fplog, &updateList_); + dimParams_, grid_, params_, t, step, fplog, &updateList_); if (params_.convolveForce) { @@ -363,7 +362,7 @@ Bias::Bias(int biasIndexInCollection, ArrayRef dimParamsInit, double beta, double mdTimeStep, - int numSharingSimulations, + const BiasSharing* biasSharing, const std::string& biasInitFilename, ThisRankWillDoIO thisRankWillDoIO, BiasParams::DisableUpdateSkips disableUpdateSkips) : @@ -375,13 +374,12 @@ Bias::Bias(int biasIndexInCollection, beta, mdTimeStep, disableUpdateSkips, - numSharingSimulations, + biasSharing ? biasSharing->numSharingSimulations(biasIndexInCollection) : 1, grid_.axis(), biasIndexInCollection), - state_(awhBiasParams, params_.initialHistogramSize, dimParams_, grid_), + state_(awhBiasParams, params_.initialHistogramSize, dimParams_, grid_, biasSharing), thisRankDoesIO_(thisRankWillDoIO == ThisRankWillDoIO::Yes), biasForce_(ndim()), - tempForce_(ndim()), numWarningsIssued_(0) { diff --git a/src/gromacs/applied_forces/awh/bias.h b/src/gromacs/applied_forces/awh/bias.h index 39eeb991b4..7b9545db61 100644 --- a/src/gromacs/applied_forces/awh/bias.h +++ b/src/gromacs/applied_forces/awh/bias.h @@ -65,7 +65,6 @@ #include "biaswriter.h" #include "dimparams.h" -struct gmx_multisim_t; struct t_commrec; struct t_enxsubblock; @@ -79,6 +78,7 @@ class AwhParams; struct AwhPointStateHistory; class CorrelationGrid; class BiasGrid; +class BiasSharing; class GridAxis; class PointState; @@ -157,7 +157,7 @@ public: * \param[in] dimParams Bias dimension parameters. * \param[in] beta 1/(k_B T). * \param[in] mdTimeStep The MD time step. - * \param[in] numSharingSimulations The number of simulations to share the bias across. + * \param[in] biasSharing Pointer to object for sharing bias over simulations, can be nullptr * \param[in] biasInitFilename Name of file to read PMF and target from. * \param[in] thisRankWillDoIO Tells whether this MPI rank will do I/O (checkpointing, AWH output), * normally (only) the master rank does I/O. @@ -169,7 +169,7 @@ public: ArrayRef dimParams, double beta, double mdTimeStep, - int numSharingSimulations, + const BiasSharing* biasSharing, const std::string& biasInitFilename, ThisRankWillDoIO thisRankWillDoIO, BiasParams::DisableUpdateSkips disableUpdateSkips = BiasParams::DisableUpdateSkips::no); @@ -207,8 +207,6 @@ public: * energy lambda state dimensions this can be empty. * \param[out] awhPotential Bias potential. * \param[out] potentialJump Change in bias potential for this bias. - * \param[in] commRecord Struct for intra-simulation communication. - * \param[in] ms Struct for multi-simulation communication. * \param[in] t Time. * \param[in] step Time step. * \param[in] seed Random seed. @@ -220,8 +218,6 @@ public: ArrayRef neighborLambdaDhdl, double* awhPotential, double* potentialJump, - const t_commrec* commRecord, - const gmx_multisim_t* ms, double t, int64_t step, int64_t seed, diff --git a/src/gromacs/applied_forces/awh/biassharing.cpp b/src/gromacs/applied_forces/awh/biassharing.cpp index 50d2603778..056451b405 100644 --- a/src/gromacs/applied_forces/awh/biassharing.cpp +++ b/src/gromacs/applied_forces/awh/biassharing.cpp @@ -45,6 +45,10 @@ #include "biassharing.h" +#include "config.h" + +#include +#include #include #include "gromacs/gmxlib/network.h" @@ -53,90 +57,264 @@ #include "gromacs/mdtypes/commrec.h" #include "gromacs/utility/arrayref.h" #include "gromacs/utility/exceptions.h" +#include "gromacs/utility/gmxassert.h" #include "gromacs/utility/stringutil.h" namespace gmx { -bool haveBiasSharingWithinSimulation(const AwhParams& awhParams) +namespace { - bool haveSharing = false; - const auto awhBiasParams = awhParams.awhBiasParams(); - for (auto awhBiasParamIt = awhBiasParams.begin(); awhBiasParamIt != awhBiasParams.end(); ++awhBiasParamIt) +//! Determines and returns which of the local biases are shared with who how many other simulations +std::multiset getGlobalShareIndices(ArrayRef localShareIndices, MPI_Comm simulationMastersComm) +{ +#if GMX_MPI + int numSimulations; + MPI_Comm_size(simulationMastersComm, &numSimulations); + int ourRank; + MPI_Comm_rank(simulationMastersComm, &ourRank); + std::vector biasCountsIn(numSimulations, 0); + std::vector biasCounts(numSimulations, 0); + biasCountsIn[ourRank] = localShareIndices.size(); + MPI_Allreduce(biasCountsIn.data(), biasCounts.data(), numSimulations, MPI_INT, MPI_SUM, simulationMastersComm); + // Now we need to gather the share indices to all (master) ranks. + // We could use MPI_Allgatherv, but thread-MPI does not support that and using + // MPI_Allreduce produces simpler code, so we use that. + int totNumBiases = 0; + int ourOffset = 0; + for (int rank = 0; rank < numSimulations; rank++) { - int shareGroup = awhBiasParamIt->shareGroup(); - if (shareGroup > 0) + if (rank == ourRank) + { + ourOffset = totNumBiases; + } + totNumBiases += biasCounts[rank]; + } + // Fill a buffer with zeros and our part of sharing indices + std::vector shareIndicesAllIn(totNumBiases, 0); + std::copy(localShareIndices.begin(), localShareIndices.end(), shareIndicesAllIn.begin() + ourOffset); + // Gather all sharing indices to all (master) ranks + std::vector shareIndicesAll(totNumBiases); + MPI_Allreduce(shareIndicesAllIn.data(), shareIndicesAll.data(), totNumBiases, MPI_INT, MPI_SUM, simulationMastersComm); +#else + GMX_UNUSED_VALUE(simulationMastersComm); + + ArrayRef shareIndicesAll = localShareIndices; +#endif // GMX_MPI + + std::multiset shareIndicesSet; + for (int shareIndex : shareIndicesAll) + { + if (shareIndex > 0) + { + shareIndicesSet.insert(shareIndex); + } + } + + return shareIndicesSet; +} + +} // namespace + +BiasSharing::BiasSharing(const AwhParams& awhParams, const t_commrec& commRecord, MPI_Comm simulationMastersComm) : + commRecord_(commRecord) +{ + if (MASTER(&commRecord)) + { + std::vector localShareIndices; + int shareGroupPrev = 0; + for (int k = 0; k < awhParams.numBias(); k++) { - for (auto awhBiasParamIt2 = awhBiasParamIt + 1; awhBiasParamIt2 != awhBiasParams.end(); - ++awhBiasParamIt2) + const int shareGroup = awhParams.awhBiasParams()[k].shareGroup(); + GMX_RELEASE_ASSERT(shareGroup >= 0, "Bias share group values should be >= 0"); + localShareIndices.push_back(shareGroup); + if (shareGroup > 0) { - if (awhBiasParamIt2->shareGroup() == shareGroup) + if (shareGroup <= shareGroupPrev) { - haveSharing = true; + GMX_THROW( + InvalidInputError("AWH biases that are shared should use increasing " + "share-group values")); } + shareGroupPrev = shareGroup; } } - } + std::multiset globalShareIndices = + getGlobalShareIndices(localShareIndices, simulationMastersComm); - return haveSharing; -} + int numSimulations = 1; +#if GMX_MPI + MPI_Comm_size(simulationMastersComm, &numSimulations); + int myRank; + MPI_Comm_rank(simulationMastersComm, &myRank); +#endif // GMX_MPI -void biasesAreCompatibleForSharingBetweenSimulations(const AwhParams& awhParams, - ArrayRef pointSize, - const gmx_multisim_t* multiSimComm) -{ - const int numSim = multiSimComm->numSimulations_; + numSharingSimulations_.resize(awhParams.numBias(), 1); + sharingSimulationIndices_.resize(awhParams.numBias(), 0); + multiSimCommPerBias_.resize(awhParams.numBias(), MPI_COMM_NULL); - /* We currently enforce subsequent shared biases to have consecutive - * share-group values starting at 1. This means we can reduce shared - * biases in order over the ranks and it does not restrict possibilities. - */ - int numShare = 0; - for (const auto& awhBiasParam : awhParams.awhBiasParams()) - { - int group = awhBiasParam.shareGroup(); - if (group > 0) + for (int shareIndex : globalShareIndices) { - numShare++; - if (group != numShare) + if (globalShareIndices.count(shareIndex) > 1) { - GMX_THROW( - InvalidInputError("AWH biases that are shared should use consequetive " - "share-group values starting at 1")); + const auto& findBiasIndex = + std::find(localShareIndices.begin(), localShareIndices.end(), shareIndex); + const index localBiasIndex = (findBiasIndex == localShareIndices.end() + ? -1 + : findBiasIndex - localShareIndices.begin()); + MPI_Comm splitComm; + if (static_cast(globalShareIndices.count(shareIndex)) == numSimulations) + { + splitComm = simulationMastersComm; + } + else + { +#if GMX_MPI + const int haveLocally = (localBiasIndex >= 0 ? 1 : 0); + MPI_Comm_split(simulationMastersComm, haveLocally, myRank, &splitComm); + createdCommList_.push_back(splitComm); +#else + GMX_RELEASE_ASSERT(false, "Can not have sharing without MPI"); +#endif // GMX_MPI + } + if (localBiasIndex >= 0) + { + numSharingSimulations_[localBiasIndex] = globalShareIndices.count(shareIndex); +#if GMX_MPI + MPI_Comm_rank(splitComm, &sharingSimulationIndices_[localBiasIndex]); +#endif // GMX_MPI + multiSimCommPerBias_[localBiasIndex] = splitComm; + } } } } - std::vector numShareAll(numSim); - numShareAll[multiSimComm->simulationIndex_] = numShare; - gmx_sumi_sim(numShareAll.size(), numShareAll.data(), multiSimComm); - for (int sim = 1; sim < numSim; sim++) + +#if GMX_MPI + if (commRecord.nnodes > 1) { - if (numShareAll[sim] != numShareAll[0]) - { - GMX_THROW(InvalidInputError( - "Different simulations attempt to share different number of biases")); - } + numSharingSimulations_.resize(awhParams.numBias()); + MPI_Bcast( + numSharingSimulations_.data(), numSharingSimulations_.size(), MPI_INT, 0, commRecord.mpi_comm_mygroup); } +#endif // GMX_MPI +} - std::vector intervals(numSim * 2); - intervals[numSim * 0 + multiSimComm->simulationIndex_] = awhParams.nstSampleCoord(); - intervals[numSim * 1 + multiSimComm->simulationIndex_] = awhParams.numSamplesUpdateFreeEnergy(); - gmx_sumi_sim(intervals.size(), intervals.data(), multiSimComm); - for (int sim = 1; sim < numSim; sim++) +BiasSharing::~BiasSharing() +{ +#if GMX_MPI + for (MPI_Comm comm : createdCommList_) { - if (intervals[sim] != intervals[0]) - { - GMX_THROW( - InvalidInputError("All simulations should have the same AWH sample interval")); - } - if (intervals[numSim + sim] != intervals[numSim]) + MPI_Comm_free(&comm); + } +#endif // GMX_MPI +} + +namespace +{ + +#if GMX_MPI + +template +std::enable_if_t, MPI_Datatype> mpiType() +{ + return MPI_INT; +} + +template +std::enable_if_t, MPI_Datatype> mpiType() +{ + return MPI_LONG; +} + +template +std::enable_if_t, MPI_Datatype> mpiType() +{ + return MPI_DOUBLE; +} + +#endif // GMX_MPI + +} // namespace + +/*! \brief + * Sum an array over all simulations on master ranks or all ranks of each simulation. + * + * This assumes the data is identical on all ranks within each simulation. + * + * \param[in,out] data The data to sum. + * \param[in] multiSimComm Communicator for the master ranks of sharing simulations. + * \param[in] broadcastWithinSimulation Broadcast the result to all ranks within the simulation + * \param[in] commRecord Struct for intra-simulation communication. + */ +template +void sumOverSimulations(ArrayRef data, + const MPI_Comm multiSimComm, + const bool broadcastWithinSimulation, + const t_commrec& commRecord) +{ +#if GMX_MPI + if (MASTER(&commRecord)) + { + MPI_Allreduce(MPI_IN_PLACE, data.data(), data.size(), mpiType(), MPI_SUM, multiSimComm); + } + if (broadcastWithinSimulation && commRecord.nnodes > 1) + { + gmx_bcast(data.size() * sizeof(T), data.data(), commRecord.mpi_comm_mygroup); + } +#else + GMX_UNUSED_VALUE(data); + GMX_UNUSED_VALUE(commRecord); + GMX_UNUSED_VALUE(multiSimComm); +#endif // GMX_MPI +} + +void BiasSharing::sumOverMasterRanks(ArrayRef data, const int biasIndex) const +{ + sumOverSimulations(data, multiSimCommPerBias_[biasIndex], false, commRecord_); +} + +void BiasSharing::sumOverMasterRanks(ArrayRef data, const int biasIndex) const +{ + sumOverSimulations(data, multiSimCommPerBias_[biasIndex], false, commRecord_); +} + +void BiasSharing::sum(ArrayRef data, const int biasIndex) const +{ + sumOverSimulations(data, multiSimCommPerBias_[biasIndex], true, commRecord_); +} + +void BiasSharing::sum(ArrayRef data, const int biasIndex) const +{ + sumOverSimulations(data, multiSimCommPerBias_[biasIndex], true, commRecord_); +} + +bool haveBiasSharingWithinSimulation(const AwhParams& awhParams) +{ + bool haveSharing = false; + + for (int k = 0; k < awhParams.numBias(); k++) + { + int shareGroup = awhParams.awhBiasParams()[k].shareGroup(); + if (shareGroup > 0) { - GMX_THROW(InvalidInputError( - "All simulations should have the same AWH free-energy update interval")); + for (int i = k + 1; i < awhParams.numBias(); i++) + { + if (awhParams.awhBiasParams()[i].shareGroup() == shareGroup) + { + haveSharing = true; + } + } } } + return haveSharing; +} + +void biasesAreCompatibleForSharingBetweenSimulations(const AwhParams& awhParams, + ArrayRef pointSize, + const BiasSharing& biasSharing) +{ /* Check the point sizes. This is a sufficient condition for running * as shared multi-sim run. No physics checks are performed here. */ @@ -145,9 +323,35 @@ void biasesAreCompatibleForSharingBetweenSimulations(const AwhParams& awhP { if (awhBiasParams[b].shareGroup() > 0) { - std::vector pointSizes(numSim); - pointSizes[multiSimComm->simulationIndex_] = pointSize[b]; - gmx_sumli_sim(pointSizes.size(), pointSizes.data(), multiSimComm); + const int numSim = biasSharing.numSharingSimulations(b); + if (numSim == 1) + { + // This bias is not actually shared + continue; + } + const int simIndex = biasSharing.sharingSimulationIndex(b); + std::vector intervals(numSim * 2); + intervals[numSim * 0 + simIndex] = awhParams.nstSampleCoord(); + intervals[numSim * 1 + simIndex] = awhParams.numSamplesUpdateFreeEnergy(); + biasSharing.sumOverMasterRanks(intervals, b); + for (int sim = 1; sim < numSim; sim++) + { + if (intervals[sim] != intervals[0]) + { + GMX_THROW(InvalidInputError( + "All simulations should have the same AWH sample interval")); + } + if (intervals[numSim + sim] != intervals[numSim]) + { + GMX_THROW( + InvalidInputError("All simulations should have the same AWH " + "free-energy update interval")); + } + } + + std::vector pointSizes(numSim); + pointSizes[simIndex] = pointSize[b]; + biasSharing.sumOverMasterRanks(pointSizes, b); for (int sim = 1; sim < numSim; sim++) { if (pointSizes[sim] != pointSizes[0]) diff --git a/src/gromacs/applied_forces/awh/biassharing.h b/src/gromacs/applied_forces/awh/biassharing.h index 1d06d0ec69..e73ec5118a 100644 --- a/src/gromacs/applied_forces/awh/biassharing.h +++ b/src/gromacs/applied_forces/awh/biassharing.h @@ -49,9 +49,15 @@ #include +#include #include +#include "gromacs/utility/arrayref.h" +#include "gromacs/utility/classhelpers.h" +#include "gromacs/utility/gmxmpi.h" + struct gmx_multisim_t; +struct t_commrec; namespace gmx { @@ -60,13 +66,60 @@ template class ArrayRef; class AwhParams; +class BiasSharing +{ +public: + /*! \brief Constructor + * + * \param[in] awhParams Parameters for all biases in this simulation + * \param[in] commRecord Intra-simulation communication record + * \param[in] simulationMastersComm MPI communicator for all master ranks of all simulations that share this bias + */ + BiasSharing(const AwhParams& awhParams, const t_commrec& commRecord, MPI_Comm simulationMastersComm); + + ~BiasSharing(); + + //! Returns the number of simulations sharing bias \p biasIndex + int numSharingSimulations(int biasIndex) const { return numSharingSimulations_[biasIndex]; } + + //! Returns the index of our simulation in the simulations sharing bias \p biasIndex + int sharingSimulationIndex(int biasIndex) const { return sharingSimulationIndices_[biasIndex]; } + + //! Sums data over the master ranks of all simulations sharing bias \p biasIndex + void sumOverMasterRanks(ArrayRef data, int biasIndex) const; + + //! Sums data over the master ranks of all simulations sharing bias \p biasIndex + void sumOverMasterRanks(ArrayRef data, int biasIndex) const; + + //! Sums data over all simulations sharing bias \p biasIndex and broadcasts to all ranks within the simulation + void sum(ArrayRef data, int biasIndex) const; + + //! Sums data over all simulations sharing bias \p biasIndex and broadcasts to all ranks within the simulation + void sum(ArrayRef data, int biasIndex) const; + +private: + //! The number of simulations sharing for each bias + std::vector numSharingSimulations_; + //! The index of our simulations in the simulations for each bias + std::vector sharingSimulationIndices_; + //! Reference to the intra-simulation communication record + const t_commrec& commRecord_; + + //! Communicator between master ranks sharing a bias, for each bias + std::vector multiSimCommPerBias_; + //! List of MPI communicators created by this object so we can destroy them on distruction + std::vector createdCommList_; + + GMX_DISALLOW_COPY_AND_ASSIGN(BiasSharing); +}; + /*! \brief Returns if any bias is sharing within a simulation. * * \param[in] awhParams The AWH parameters. */ bool haveBiasSharingWithinSimulation(const AwhParams& awhParams); -/*! \brief Checks if biases are compatible for sharing between simulations, throws if not. +/*! \brief Checks whether biases are compatible for sharing between simulations, throws when not. * * Should be called simultaneously on the master rank of every simulation. * Note that this only checks for technical compatibility. It is up to @@ -75,11 +128,11 @@ bool haveBiasSharingWithinSimulation(const AwhParams& awhParams); * * \param[in] awhParams The AWH parameters. * \param[in] pointSize Vector of grid-point sizes for each bias. - * \param[in] multiSimComm Struct for multi-simulation communication. + * \param[in] biasSharing Object for communication for sharing bias data over simulations. */ void biasesAreCompatibleForSharingBetweenSimulations(const AwhParams& awhParams, ArrayRef pointSize, - const gmx_multisim_t* multiSimComm); + const BiasSharing& biasSharing); } // namespace gmx diff --git a/src/gromacs/applied_forces/awh/biasstate.cpp b/src/gromacs/applied_forces/awh/biasstate.cpp index 55442b10fd..3a43f29db1 100644 --- a/src/gromacs/applied_forces/awh/biasstate.cpp +++ b/src/gromacs/applied_forces/awh/biasstate.cpp @@ -74,6 +74,7 @@ #include "gromacs/utility/stringutil.h" #include "biasgrid.h" +#include "biassharing.h" #include "pointstate.h" namespace gmx @@ -95,70 +96,24 @@ void BiasState::getPmf(gmx::ArrayRef pmf) const namespace { -/*! \brief - * Sum an array over all simulations on the master rank of each simulation. - * - * \param[in,out] arrayRef The data to sum. - * \param[in] multiSimComm Struct for multi-simulation communication. - */ -void sumOverSimulations(gmx::ArrayRef arrayRef, const gmx_multisim_t* multiSimComm) -{ - gmx_sumi_sim(arrayRef.size(), arrayRef.data(), multiSimComm); -} - -/*! \brief - * Sum an array over all simulations on the master rank of each simulation. - * - * \param[in,out] arrayRef The data to sum. - * \param[in] multiSimComm Struct for multi-simulation communication. - */ -void sumOverSimulations(gmx::ArrayRef arrayRef, const gmx_multisim_t* multiSimComm) -{ - gmx_sumd_sim(arrayRef.size(), arrayRef.data(), multiSimComm); -} - -/*! \brief - * Sum an array over all simulations on all ranks of each simulation. - * - * This assumes the data is identical on all ranks within each simulation. - * - * \param[in,out] arrayRef The data to sum. - * \param[in] commRecord Struct for intra-simulation communication. - * \param[in] multiSimComm Struct for multi-simulation communication. - */ -template -void sumOverSimulations(gmx::ArrayRef arrayRef, const t_commrec* commRecord, const gmx_multisim_t* multiSimComm) -{ - if (MASTER(commRecord)) - { - sumOverSimulations(arrayRef, multiSimComm); - } - if (commRecord->nnodes > 1) - { - gmx_bcast(arrayRef.size() * sizeof(T), arrayRef.data(), commRecord->mpi_comm_mygroup); - } -} - /*! \brief * Sum PMF over multiple simulations, when requested. * * \param[in,out] pointState The state of the points in the bias. * \param[in] numSharedUpdate The number of biases sharing the histogram. - * \param[in] commRecord Struct for intra-simulation communication. - * \param[in] multiSimComm Struct for multi-simulation communication. + * \param[in] biasSharing Object for sharing bias data over multiple simulations + * \param[in] biasIndex Index of this bias in the total list of biases in this simulation */ -void sumPmf(gmx::ArrayRef pointState, - int numSharedUpdate, - const t_commrec* commRecord, - const gmx_multisim_t* multiSimComm) +void sumPmf(gmx::ArrayRef pointState, int numSharedUpdate, const BiasSharing* biasSharing, const int biasIndex) { if (numSharedUpdate == 1) { return; } - GMX_ASSERT(multiSimComm != nullptr && numSharedUpdate % multiSimComm->numSimulations_ == 0, + GMX_ASSERT(biasSharing != nullptr + && numSharedUpdate % biasSharing->numSharingSimulations(biasIndex) == 0, "numSharedUpdate should be a multiple of multiSimComm->numSimulations_"); - GMX_ASSERT(numSharedUpdate == multiSimComm->numSimulations_, + GMX_ASSERT(numSharedUpdate == biasSharing->numSharingSimulations(biasIndex), "Sharing within a simulation is not implemented (yet)"); std::vector buffer(pointState.size()); @@ -169,7 +124,7 @@ void sumPmf(gmx::ArrayRef pointState, buffer[i] = pointState[i].inTargetRegion() ? std::exp(pointState[i].logPmfSum()) : 0; } - sumOverSimulations(gmx::ArrayRef(buffer), commRecord, multiSimComm); + biasSharing->sum(gmx::ArrayRef(buffer), biasIndex); /* Take log again to get (non-normalized) PMF */ double normFac = 1.0 / numSharedUpdate; @@ -697,13 +652,13 @@ namespace * * \param[in,out] updateList Update list for this simulation (assumed >= npoints long). * \param[in] numPoints Total number of points. - * \param[in] commRecord Struct for intra-simulation communication. - * \param[in] multiSimComm Struct for multi-simulation communication. + * \param[in] biasSharing Object for sharing bias data over multiple simulations + * \param[in] biasIndex Index of this bias in the total list of biases in this simulation */ -void mergeSharedUpdateLists(std::vector* updateList, - int numPoints, - const t_commrec* commRecord, - const gmx_multisim_t* multiSimComm) +void mergeSharedUpdateLists(std::vector* updateList, + int numPoints, + const BiasSharing& biasSharing, + const int biasIndex) { std::vector numUpdatesOfPoint; @@ -716,7 +671,7 @@ void mergeSharedUpdateLists(std::vector* updateList, } /* Sum over the sims to get all the flagged points */ - sumOverSimulations(arrayRefFromArray(numUpdatesOfPoint.data(), numPoints), commRecord, multiSimComm); + biasSharing.sum(arrayRefFromArray(numUpdatesOfPoint.data(), numPoints), biasIndex); /* Collect the indices of the flagged points in place. The resulting array will be the merged update list.*/ updateList->clear(); @@ -796,15 +751,15 @@ namespace * \param[in,out] pointState The state of the points in the bias. * \param[in,out] weightSumCovering The weights for checking covering. * \param[in] numSharedUpdate The number of biases sharing the histrogram. - * \param[in] commRecord Struct for intra-simulation communication. - * \param[in] multiSimComm Struct for multi-simulation communication. - * \param[in] localUpdateList List of points with data. + * \param[in] biasSharing Object for sharing bias data over multiple simulations + * \param[in] biasIndex Index of this bias in the total list of biases in this + * simulation \param[in] localUpdateList List of points with data. */ void sumHistograms(gmx::ArrayRef pointState, gmx::ArrayRef weightSumCovering, int numSharedUpdate, - const t_commrec* commRecord, - const gmx_multisim_t* multiSimComm, + const BiasSharing* biasSharing, + const int biasIndex, const std::vector& localUpdateList) { /* The covering checking histograms are added before summing over simulations, so that the @@ -817,7 +772,7 @@ void sumHistograms(gmx::ArrayRef pointState, /* Sum histograms over multiple simulations if needed. */ if (numSharedUpdate > 1) { - GMX_ASSERT(numSharedUpdate == multiSimComm->numSimulations_, + GMX_ASSERT(numSharedUpdate == biasSharing->numSharingSimulations(biasIndex), "Sharing within a simulation is not implemented (yet)"); /* Collect the weights and counts in linear arrays to be able to use gmx_sumd_sim. */ @@ -835,8 +790,8 @@ void sumHistograms(gmx::ArrayRef pointState, coordVisits[localIndex] = ps.numVisitsIteration(); } - sumOverSimulations(gmx::ArrayRef(weightSum), commRecord, multiSimComm); - sumOverSimulations(gmx::ArrayRef(coordVisits), commRecord, multiSimComm); + biasSharing->sum(gmx::ArrayRef(weightSum), biasIndex); + biasSharing->sum(gmx::ArrayRef(coordVisits), biasIndex); /* Transfer back the result */ for (size_t localIndex = 0; localIndex < localUpdateList.size(); localIndex++) @@ -964,9 +919,7 @@ void labelCoveredPoints(const std::vector& visited, bool BiasState::isSamplingRegionCovered(const BiasParams& params, ArrayRef dimParams, - const BiasGrid& grid, - const t_commrec* commRecord, - const gmx_multisim_t* multiSimComm) const + const BiasGrid& grid) const { /* Allocate and initialize arrays: one for checking visits along each dimension, one for keeping track of which points to check and one for the covered points. @@ -1072,10 +1025,8 @@ bool BiasState::isSamplingRegionCovered(const BiasParams& params, /* For multiple dimensions this may not be the best way to do it. */ for (int d = 0; d < grid.numDimensions(); d++) { - sumOverSimulations( - gmx::arrayRefFromArray(checkDim[d].covered.data(), grid.axis(d).numPoints()), - commRecord, - multiSimComm); + biasSharing_->sum(gmx::arrayRefFromArray(checkDim[d].covered.data(), grid.axis(d).numPoints()), + params.biasIndex); } } @@ -1110,8 +1061,6 @@ static void normalizeFreeEnergyAndPmfSum(std::vector* pointState) void BiasState::updateFreeEnergyAndAddSamplesToHistogram(ArrayRef dimParams, const BiasGrid& grid, const BiasParams& params, - const t_commrec* commRecord, - const gmx_multisim_t* multiSimComm, double t, int64_t step, FILE* fplog, @@ -1130,16 +1079,16 @@ void BiasState::updateFreeEnergyAndAddSamplesToHistogram(ArrayRef 1) { - mergeSharedUpdateLists(updateList, points_.size(), commRecord, multiSimComm); + mergeSharedUpdateLists(updateList, points_.size(), *biasSharing_, params.biasIndex); } /* Reset the range for the next update */ resetLocalUpdateRange(grid); /* Add samples to histograms for all local points and sync simulations if needed */ - sumHistograms(points_, weightSumCovering_, params.numSharedUpdate, commRecord, multiSimComm, *updateList); + sumHistograms(points_, weightSumCovering_, params.numSharedUpdate, biasSharing_, params.biasIndex, *updateList); - sumPmf(points_, params.numSharedUpdate, commRecord, multiSimComm); + sumPmf(points_, params.numSharedUpdate, biasSharing_, params.biasIndex); /* Renormalize the free energy if values are too large. */ bool needToNormalizeFreeEnergy = false; @@ -1166,8 +1115,7 @@ void BiasState::updateFreeEnergyAndAddSamplesToHistogram(ArrayRef dimParams, - const BiasGrid& grid) : + const BiasGrid& grid, + const BiasSharing* biasSharing) : coordState_(awhBiasParams, dimParams, grid), points_(grid.numPoints()), weightSumCovering_(grid.numPoints()), - histogramSize_(awhBiasParams, histogramSizeInitial) + histogramSize_(awhBiasParams, histogramSizeInitial), + biasSharing_(biasSharing) { /* The minimum and maximum multidimensional point indices that are affected by the next update */ for (size_t d = 0; d < dimParams.size(); d++) diff --git a/src/gromacs/applied_forces/awh/biasstate.h b/src/gromacs/applied_forces/awh/biasstate.h index 859f524af6..87a6b039ea 100644 --- a/src/gromacs/applied_forces/awh/biasstate.h +++ b/src/gromacs/applied_forces/awh/biasstate.h @@ -66,7 +66,6 @@ #include "dimparams.h" #include "histogramsize.h" -struct gmx_multisim_t; struct t_commrec; namespace gmx @@ -77,6 +76,7 @@ class ArrayRef; struct AwhBiasHistory; class BiasParams; class BiasGrid; +class BiasSharing; class GridAxis; class PointState; @@ -105,11 +105,13 @@ public: * entries and grow by a floating-point scaling factor. * \param[in] dimParams The dimension parameters. * \param[in] grid The bias grid. + * \param[in] biasSharing Multisim bias sharing object, can be nullptrx */ BiasState(const AwhBiasParams& awhBiasParams, double histogramSizeInitial, ArrayRef dimParams, - const BiasGrid& grid); + const BiasGrid& grid, + const BiasSharing* biasSharing); /*! \brief * Restore the bias state from history. @@ -356,15 +358,11 @@ private: * \param[in] params The bias parameters. * \param[in] dimParams Bias dimension parameters. * \param[in] grid The grid. - * \param[in] commRecord Struct for intra-simulation communication. - * \param[in] multiSimComm Struct for multi-simulation communication. * \returns true if covered. */ bool isSamplingRegionCovered(const BiasParams& params, ArrayRef dimParams, - const BiasGrid& grid, - const t_commrec* commRecord, - const gmx_multisim_t* multiSimComm) const; + const BiasGrid& grid) const; /*! \brief * Return the new reference weight histogram size for the current update. @@ -408,8 +406,6 @@ public: * \param[in] dimParams The dimension parameters. * \param[in] grid The grid. * \param[in] params The bias parameters. - * \param[in] commRecord Struct for intra-simulation communication. - * \param[in] ms Struct for multi-simulation communication. * \param[in] t Time. * \param[in] step Time step. * \param[in,out] fplog Log file. @@ -418,8 +414,6 @@ public: void updateFreeEnergyAndAddSamplesToHistogram(ArrayRef dimParams, const BiasGrid& grid, const BiasParams& params, - const t_commrec* commRecord, - const gmx_multisim_t* ms, double t, int64_t step, FILE* fplog, @@ -545,6 +539,9 @@ private: /* Track the part of the grid sampled since the last update. */ awh_ivec originUpdatelist_; /**< The origin of the rectangular region that has been sampled since last update. */ awh_ivec endUpdatelist_; /**< The end of the rectangular region that has been sampled since last update. */ + + //! Object for sharing biases over multiple simulations, can be nullptr + const BiasSharing* biasSharing_; }; //! Linewidth used for warning output diff --git a/src/gromacs/applied_forces/awh/tests/CMakeLists.txt b/src/gromacs/applied_forces/awh/tests/CMakeLists.txt index 864dcabab8..2ea825ea59 100644 --- a/src/gromacs/applied_forces/awh/tests/CMakeLists.txt +++ b/src/gromacs/applied_forces/awh/tests/CMakeLists.txt @@ -37,6 +37,7 @@ gmx_add_unit_test(AwhTest awh-test awh_setup.cpp bias.cpp biasgrid.cpp + biassharing.cpp biasstate.cpp bias_fep_lambda_state.cpp ) diff --git a/src/gromacs/applied_forces/awh/tests/awh_setup.cpp b/src/gromacs/applied_forces/awh/tests/awh_setup.cpp index edb67b5e51..4819171015 100644 --- a/src/gromacs/applied_forces/awh/tests/awh_setup.cpp +++ b/src/gromacs/applied_forces/awh/tests/awh_setup.cpp @@ -103,12 +103,14 @@ std::vector awhDimParamSerialized(AwhCoordinateProviderType inputCoordinat * \param[in] beta Value for 1/(kB*T). * \param[in] inputErrorScaling Factor for initial error scaling. * \param[in] dimensionParameterBuffers Buffers containing the dimension parameters. + * \param[in] shareGroup share group for, potentially, sharing the bias between simulations * \param[in] inputUserData If there is a user provided PMF estimate. */ static std::vector awhBiasParamSerialized(AwhHistogramGrowthType eawhgrowth, double beta, double inputErrorScaling, ArrayRef> dimensionParameterBuffers, + int shareGroup, bool inputUserData) { int ndim = dimensionParameterBuffers.size(); @@ -118,7 +120,6 @@ static std::vector awhBiasParamSerialized(AwhHistogramGrowthType AwhHistogramGrowthType eGrowth = eawhgrowth; bool bUserData = inputUserData; double errorInitial = inputErrorScaling / beta; - int shareGroup = 0; bool equilibrateHistogram = false; gmx::InMemorySerializer serializer; @@ -151,6 +152,7 @@ static std::vector awhBiasParamSerialized(AwhHistogramGrowthType * \param[in] inputErrorScaling Factor for initial error scaling. * \param[in] inputSeed Seed value to use. * \param[in] dimensionParameterBuffers Buffers containing the dimension parameters. + * \param[in] biasShareGroup share group for, potentially, sharing the bias over simulations * \param[in] inputUserData If there is a user provided PMF estimate. */ static std::vector awhParamSerialized(AwhHistogramGrowthType eawhgrowth, @@ -159,6 +161,7 @@ static std::vector awhParamSerialized(AwhHistogramGrowthType ea double inputErrorScaling, int64_t inputSeed, ArrayRef> dimensionParameterBuffers, + int biasShareGroup, bool inputUserData) { int numBias = 1; @@ -180,7 +183,7 @@ static std::vector awhParamSerialized(AwhHistogramGrowthType ea auto awhParamBuffer = serializer.finishAndGetBuffer(); auto awhBiasBuffer = awhBiasParamSerialized( - eawhgrowth, beta, inputErrorScaling, dimensionParameterBuffers, inputUserData); + eawhgrowth, beta, inputErrorScaling, dimensionParameterBuffers, biasShareGroup, inputUserData); awhParamBuffer.insert(awhParamBuffer.end(), awhBiasBuffer.begin(), awhBiasBuffer.end()); @@ -200,14 +203,15 @@ AwhTestParameters getAwhTestParameters(AwhHistogramGrowthType eawhgro double beta, bool useAwhFep, double inputErrorScaling, - int numFepLambdaStates) + int numFepLambdaStates, + int biasShareGroup) { double convFactor = 1; double k = 1000; int64_t seed = 93471803; auto awhParamBuffer = awhParamSerialized( - eawhgrowth, eawhpotential, beta, inputErrorScaling, seed, dimensionParameterBuffers, inputUserData); + eawhgrowth, eawhpotential, beta, inputErrorScaling, seed, dimensionParameterBuffers, biasShareGroup, inputUserData); gmx::InMemoryDeserializer deserializer(awhParamBuffer, false); AwhTestParameters params(&deserializer); @@ -249,7 +253,7 @@ TEST(SerializationTest, CanSerializeBiasParams) auto awhDimBuffer = awhDimParamSerialized(); auto awhDimArrayRef = gmx::arrayRefFromArray(&awhDimBuffer, 1); auto awhBiasBuffer = awhBiasParamSerialized( - AwhHistogramGrowthType::ExponentialLinear, 0.4, 0.5, awhDimArrayRef, false); + AwhHistogramGrowthType::ExponentialLinear, 0.4, 0.5, awhDimArrayRef, 0, false); gmx::InMemoryDeserializer deserializer(awhBiasBuffer, false); AwhBiasParams awhBiasParams(&deserializer); EXPECT_EQ(awhBiasParams.ndim(), 1); @@ -281,8 +285,14 @@ TEST(SerializationTest, CanSerializeAwhParams) { auto awhDimBuffer = awhDimParamSerialized(); auto awhDimArrayRef = gmx::arrayRefFromArray(&awhDimBuffer, 1); - auto awhParamBuffer = awhParamSerialized( - AwhHistogramGrowthType::ExponentialLinear, AwhPotentialType::Convolved, 0.4, 0.5, 1337, awhDimArrayRef, false); + auto awhParamBuffer = awhParamSerialized(AwhHistogramGrowthType::ExponentialLinear, + AwhPotentialType::Convolved, + 0.4, + 0.5, + 1337, + awhDimArrayRef, + 0, + false); gmx::InMemoryDeserializer deserializer(awhParamBuffer, false); AwhParams awhParams(&deserializer); EXPECT_EQ(awhParams.numBias(), 1); diff --git a/src/gromacs/applied_forces/awh/tests/awh_setup.h b/src/gromacs/applied_forces/awh/tests/awh_setup.h index 6e2c1f553d..ee6cb618fd 100644 --- a/src/gromacs/applied_forces/awh/tests/awh_setup.h +++ b/src/gromacs/applied_forces/awh/tests/awh_setup.h @@ -96,7 +96,8 @@ AwhTestParameters getAwhTestParameters(AwhHistogramGrowthType eawhgro double beta, bool useAwhFep, double inputErrorScaling, - int numFepLambdaStates); + int numFepLambdaStates, + int biasShareGroup = 0); } // namespace test } // namespace gmx diff --git a/src/gromacs/applied_forces/awh/tests/bias.cpp b/src/gromacs/applied_forces/awh/tests/bias.cpp index 99e5ea4e59..1b810fb569 100644 --- a/src/gromacs/applied_forces/awh/tests/bias.cpp +++ b/src/gromacs/applied_forces/awh/tests/bias.cpp @@ -134,7 +134,7 @@ public: params_->dimParams, params_->beta, mdTimeStep, - 1, + nullptr, "", Bias::ThisRankWillDoIO::No, disableUpdateSkips); @@ -170,7 +170,7 @@ TEST_P(BiasTest, ForcesBiasPmf) awh_dvec coordValue = { coord, 0, 0, 0 }; double potential = 0; gmx::ArrayRef biasForce = bias.calcForceAndUpdateBias( - coordValue, {}, {}, &potential, &potentialJump, nullptr, nullptr, step, step, seed_, nullptr); + coordValue, {}, {}, &potential, &potentialJump, step, step, seed_, nullptr); force.push_back(biasForce[0]); pot.push_back(potential); @@ -249,7 +249,7 @@ TEST(BiasTest, DetectsCovering) params.dimParams, params.beta, mdTimeStep, - 1, + nullptr, "", Bias::ThisRankWillDoIO::No); @@ -272,17 +272,8 @@ TEST(BiasTest, DetectsCovering) awh_dvec coordValue = { coord, 0, 0, 0 }; double potential = 0; double potentialJump = 0; - bias.calcForceAndUpdateBias(coordValue, - {}, - {}, - &potential, - &potentialJump, - nullptr, - nullptr, - step, - step, - params.awhParams.seed(), - nullptr); + bias.calcForceAndUpdateBias( + coordValue, {}, {}, &potential, &potentialJump, step, step, params.awhParams.seed(), nullptr); inInitialStage = bias.state().inInitialStage(); if (!inInitialStage) diff --git a/src/gromacs/applied_forces/awh/tests/bias_fep_lambda_state.cpp b/src/gromacs/applied_forces/awh/tests/bias_fep_lambda_state.cpp index fd728f99ec..e8eed23aca 100644 --- a/src/gromacs/applied_forces/awh/tests/bias_fep_lambda_state.cpp +++ b/src/gromacs/applied_forces/awh/tests/bias_fep_lambda_state.cpp @@ -127,7 +127,7 @@ public: params_->dimParams, params_->beta, mdTimeStep, - 1, + nullptr, "", Bias::ThisRankWillDoIO::No, disableUpdateSkips); @@ -177,8 +177,6 @@ TEST_P(BiasFepLambdaStateTest, ForcesBiasPmf) neighborLambdaDhdl, &potential, &potentialJump, - nullptr, - nullptr, step * mdTimeStep, step, seed_, @@ -256,7 +254,7 @@ TEST(BiasFepLambdaStateTest, DetectsCovering) params.dimParams, params.beta, mdTimeStep, - 1, + nullptr, "", Bias::ThisRankWillDoIO::No); @@ -288,8 +286,6 @@ TEST(BiasFepLambdaStateTest, DetectsCovering) neighborLambdaDhdl, &potential, &potentialJump, - nullptr, - nullptr, step, step, params.awhParams.seed(), diff --git a/src/gromacs/applied_forces/awh/tests/biassharing.cpp b/src/gromacs/applied_forces/awh/tests/biassharing.cpp new file mode 100644 index 0000000000..fe1e649eb0 --- /dev/null +++ b/src/gromacs/applied_forces/awh/tests/biassharing.cpp @@ -0,0 +1,128 @@ +/* + * This file is part of the GROMACS molecular simulation package. + * + * Copyright (c) 2021, by the GROMACS development team, led by + * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, + * and including many others, as listed in the AUTHORS file in the + * top-level source directory and at http://www.gromacs.org. + * + * GROMACS is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * GROMACS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with GROMACS; if not, see + * http://www.gnu.org/licenses, or write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * If you want to redistribute modifications to GROMACS, please + * consider that scientific software is very special. Version + * control is crucial - bugs must be traceable. We will be happy to + * consider code for inclusion in the official distribution, but + * derived work must not be called official GROMACS. Details are found + * in the README & COPYING files - if they are missing, get the + * official version at http://www.gromacs.org. + * + * To help us fund GROMACS development, we humbly ask that you cite + * the research papers on the package. Check out http://www.gromacs.org. + */ +#include "gmxpre.h" + +#include "gromacs/applied_forces/awh/biassharing.h" + +#include +#include +#include + +#include "gromacs/mdtypes/commrec.h" +#include "thread_mpi/tmpi.h" + +#include "gromacs/applied_forces/awh/tests/awh_setup.h" +#include "testutils/testasserts.h" + +namespace gmx +{ + +namespace test +{ + +// This test requires thread-MPI +#if GMX_THREAD_MPI + +namespace +{ + +//! The number of thread-MPI ranks to run this test on +const int c_numRanks = 4; + +//! The number of simulations sharing the same bias +const int c_numSharingBiases = 2; + +/*! \brief The actual test body, executed by each MPI rank + * + * Sets ups sharing and sums over sahring simulations. + */ +void parallelTestFunction(const void gmx_unused* dummy) +{ + int numRanks; + MPI_Comm_size(MPI_COMM_WORLD, &numRanks); + GMX_RELEASE_ASSERT(numRanks == c_numRanks, "Expect c_numRanks thread-MPI ranks"); + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD, &myRank); + const int shareGroup = 1 + (myRank / c_numSharingBiases); + + t_commrec commRecord; + commRecord.nnodes = 1; + commRecord.nodeid = 0; + + const std::vector serializedAwhParametersPerDim = awhDimParamSerialized(); + auto awhDimArrayRef = gmx::arrayRefFromArray(&serializedAwhParametersPerDim, 1); + AwhTestParameters params = getAwhTestParameters(AwhHistogramGrowthType::ExponentialLinear, + AwhPotentialType::Convolved, + awhDimArrayRef, + false, + 0.4, + false, + 0.5, + 0, + shareGroup); + + BiasSharing biasSharing(params.awhParams, commRecord, MPI_COMM_WORLD); + + EXPECT_EQ(biasSharing.numSharingSimulations(0), c_numSharingBiases); + EXPECT_EQ(biasSharing.sharingSimulationIndex(0), myRank % c_numSharingBiases); + + const std::array input{ 1, 2, 4, 8 }; + std::array buffer{ input[myRank] }; + + biasSharing.sumOverMasterRanks(buffer, 0); + int expectedSum = 0; + for (int i = 0; i < c_numSharingBiases; i++) + { + expectedSum += input[(myRank / c_numSharingBiases) * c_numSharingBiases + i]; + } + EXPECT_EQ(buffer[0], expectedSum); +} + +} // namespace + +TEST(BiasSharingTest, SharingWorks) +{ + if (tMPI_Init_fn(TRUE, c_numRanks, TMPI_AFFINITY_NONE, parallelTestFunction, static_cast(this)) + != TMPI_SUCCESS) + { + GMX_THROW(gmx::InternalError("Failed to spawn thread-MPI threads")); + } +} + +#endif + +} // namespace test +} // namespace gmx diff --git a/src/gromacs/applied_forces/awh/tests/biasstate.cpp b/src/gromacs/applied_forces/awh/tests/biasstate.cpp index 66a19245d4..a22ac6413f 100644 --- a/src/gromacs/applied_forces/awh/tests/biasstate.cpp +++ b/src/gromacs/applied_forces/awh/tests/biasstate.cpp @@ -102,7 +102,7 @@ public: BiasGrid grid(dimParams, awhBiasParams.dimParams()); BiasParams biasParams( awhParams, awhBiasParams, dimParams, 1.0, 1.0, BiasParams::DisableUpdateSkips::no, 1, grid.axis(), 0); - biasState_ = std::make_unique(awhBiasParams, 1.0, dimParams, grid); + biasState_ = std::make_unique(awhBiasParams, 1.0, dimParams, grid, nullptr); // Here we initialize the grid point state using the input file std::string filename = gmx::test::TestFileManager::getInputFilePath(GetParam());