2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2005 - 2014, The GROMACS development team.
5 * Copyright (c) 2015,2016,2017,2018,2019 by the GROMACS development team.
6 * Copyright (c) 2020,2021, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \defgroup module_domdec Spatial domain decomposition (for parallelization over MPI)
38 * \ingroup group_mdrun
40 * \brief Manages the decomposition of the simulation volume over MPI
41 * ranks to try to distribute work evenly with minimal communication
44 * \todo Get domdec stuff out of mdtypes/commrec.h
46 * \author Berk Hess <hess@kth.se>
50 /*! \libinternal \file
52 * \brief This file declares functions for mdrun to call to manage the
53 * details of its domain decomposition.
55 * \author Berk Hess <hess@kth.se>
57 * \ingroup module_domdec
60 #ifndef GMX_DOMDEC_DOMDEC_H
61 #define GMX_DOMDEC_DOMDEC_H
65 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
66 #include "gromacs/math/vectypes.h"
67 #include "gromacs/utility/real.h"
71 struct gmx_domdec_zones_t;
72 struct gmx_localtop_t;
80 enum class PbcType : int;
83 class GpuEventSynchronizer;
87 struct AtomInfoWithinMoleculeBlock;
88 class DeviceStreamManager;
89 class ForceWithShiftForces;
91 class RangePartitioning;
92 class VirtualSitesHandler;
95 template<typename, size_t>
96 class FixedCapacityVector;
99 /*! \brief Returns the global topology atom number belonging to local atom index i.
101 * This function is intended for writing ASCII output
102 * and returns atom numbers starting at 1.
103 * When dd=NULL returns i+1.
105 int ddglatnr(const gmx_domdec_t* dd, int i);
107 /*! \brief Store the global cg indices of the home cgs in state,
109 * This means it can be reset, even after a new DD partitioning.
111 void dd_store_state(const gmx_domdec_t& dd, t_state* state);
113 /*! \brief Returns a pointer to the gmx_domdec_zones_t struct */
114 struct gmx_domdec_zones_t* domdec_zones(struct gmx_domdec_t* dd);
116 /*! \brief Returns the range for atoms in zones*/
117 int dd_numAtomsZones(const gmx_domdec_t& dd);
119 /*! \brief Returns the number of home atoms */
120 int dd_numHomeAtoms(const gmx_domdec_t& dd);
122 /*! \brief Returns the atom range in the local state for atoms that need to be present in mdatoms */
123 int dd_natoms_mdatoms(const gmx_domdec_t& dd);
125 /*! \brief Returns the atom range in the local state for atoms involved in virtual sites */
126 int dd_natoms_vsite(const gmx_domdec_t& dd);
128 /*! \brief Sets the atom range for atom in the local state for atoms received in constraints communication */
129 void dd_get_constraint_range(const gmx_domdec_t& dd, int* at_start, int* at_end);
131 /*! \libinternal \brief Struct for passing around the number of PME domains */
134 int x; //!< The number of PME domains along dimension x
135 int y; //!< The number of PME domains along dimension y
138 /*! \brief Returns the number of PME domains, can be called with dd=NULL */
139 NumPmeDomains getNumPmeDomains(const gmx_domdec_t* dd);
141 /*! \brief Returns the set of DD ranks that communicate with pme node cr->nodeid */
142 std::vector<int> get_pme_ddranks(const t_commrec* cr, int pmenodeid);
144 /*! \brief Returns the maximum shift for coordinate communication in PME, dim x */
145 int dd_pme_maxshift_x(const gmx_domdec_t& dd);
147 /*! \brief Returns the maximum shift for coordinate communication in PME, dim y */
148 int dd_pme_maxshift_y(const gmx_domdec_t& dd);
150 /*! \brief Return whether update groups are used */
151 bool ddUsesUpdateGroups(const gmx_domdec_t& dd);
153 /*! \brief Returns whether molecules are always whole, i.e. not broken by PBC */
154 bool dd_moleculesAreAlwaysWhole(const gmx_domdec_t& dd);
156 /*! \brief Returns if we need to do pbc for calculating bonded interactions */
157 bool dd_bonded_molpbc(const gmx_domdec_t& dd, PbcType pbcType);
159 /*! \brief Change the DD non-bonded communication cut-off.
161 * This could fail when trying to increase the cut-off,
162 * then FALSE will be returned and the cut-off is not modified.
164 * \param[in] cr Communication recrod
165 * \param[in] box Box matrix, used for computing the dimensions of the system
166 * \param[in] x Position vector, used for computing the dimensions of the system
167 * \param[in] cutoffRequested The requested atom to atom cut-off distance, usually the pair-list cutoff distance
169 bool change_dd_cutoff(t_commrec* cr, const matrix box, gmx::ArrayRef<const gmx::RVec> x, real cutoffRequested);
171 /*! \brief Set up communication for averaging GPU wait times over domains
173 * When domains (PP MPI ranks) share a GPU, the individual GPU wait times
174 * are meaningless, as it depends on the order in which tasks on the same
175 * GPU finish. Therefore there wait times need to be averaged over the ranks
176 * sharing the same GPU. This function sets up the communication for that.
178 void dd_setup_dlb_resource_sharing(const t_commrec* cr, int gpu_id);
180 /*! \brief Cycle counter indices used internally in the domain decomposition */
191 /*! \brief Add the wallcycle count to the DD counter */
192 void dd_cycles_add(const gmx_domdec_t* dd, float cycles, int ddCycl);
194 /*! \brief Communicate the coordinates to the neighboring cells and do pbc. */
195 void dd_move_x(struct gmx_domdec_t* dd, const matrix box, gmx::ArrayRef<gmx::RVec> x, gmx_wallcycle* wcycle);
197 /*! \brief Sum the forces over the neighboring cells.
199 * When fshift!=NULL the shift forces are updated to obtain
200 * the correct virial from the single sum including f.
202 void dd_move_f(struct gmx_domdec_t* dd, gmx::ForceWithShiftForces* forceWithShiftForces, gmx_wallcycle* wcycle);
204 /*! \brief Communicate a real for each atom to the neighboring cells. */
205 void dd_atom_spread_real(struct gmx_domdec_t* dd, real v[]);
207 /*! \brief Sum the contributions to a real for each atom over the neighboring cells. */
208 void dd_atom_sum_real(struct gmx_domdec_t* dd, real v[]);
210 /*! \brief Reset all the statistics and counters for total run counting */
211 void reset_dd_statistics_counters(struct gmx_domdec_t* dd);
213 /* In domdec_con.c */
215 /*! \brief Communicates the virtual site forces, reduces the shift forces when \p fshift != NULL */
216 void dd_move_f_vsites(const gmx_domdec_t& dd, gmx::ArrayRef<gmx::RVec> f, gmx::ArrayRef<gmx::RVec> fshift);
218 /*! \brief Clears the forces for virtual sites */
219 void dd_clear_f_vsites(const gmx_domdec_t& dd, gmx::ArrayRef<gmx::RVec> f);
221 /*! \brief Move x0 and also x1 if x1!=NULL. bX1IsCoord tells if to do PBC on x1 */
222 void dd_move_x_constraints(struct gmx_domdec_t* dd,
224 gmx::ArrayRef<gmx::RVec> x0,
225 gmx::ArrayRef<gmx::RVec> x1,
228 /*! \brief Communicates the coordinates involved in virtual sites */
229 void dd_move_x_vsites(const gmx_domdec_t& dd, const matrix box, rvec* x);
230 /*! \brief Communicates the positions and velocities involved in virtual sites */
231 void dd_move_x_and_v_vsites(const gmx_domdec_t& dd, const matrix box, rvec* x, rvec* v);
233 /*! \brief Returns the local atom count array for all constraints
235 * The local atom count for a constraint, possible values 2/1/0, is needed
236 * to avoid not/double-counting contributions linked to the Lagrange
237 * multiplier, such as the virial and free-energy derivatives.
239 * \note When \p dd = nullptr, an empty reference is returned.
241 gmx::ArrayRef<const int> dd_constraints_nlocalatoms(const gmx_domdec_t* dd);
243 /*! \brief Construct local state */
244 void dd_init_local_state(const gmx_domdec_t& dd, const t_state* state_global, t_state* local_state);
246 /*! \brief Construct the GPU halo exchange object(s).
248 * \param[in] mdlog The logger object.
249 * \param[in] cr The commrec object.
250 * \param[in] deviceStreamManager Manager of the GPU context and streams.
251 * \param[in] wcycle The wallclock counter.
253 void constructGpuHaloExchange(const gmx::MDLogger& mdlog,
255 const gmx::DeviceStreamManager& deviceStreamManager,
256 gmx_wallcycle* wcycle);
259 * (Re-) Initialization for GPU halo exchange
260 * \param [in] cr The commrec object
261 * \param [in] d_coordinatesBuffer pointer to coordinates buffer in GPU memory
262 * \param [in] d_forcesBuffer pointer to forces buffer in GPU memory
264 void reinitGpuHaloExchange(const t_commrec& cr,
265 DeviceBuffer<gmx::RVec> d_coordinatesBuffer,
266 DeviceBuffer<gmx::RVec> d_forcesBuffer);
269 /*! \brief GPU halo exchange of coordinates buffer.
270 * \param [in] cr The commrec object
271 * \param [in] box Coordinate box (from which shifts will be constructed)
272 * \param [in] dependencyEvent Dependency event for this operation
273 * \returns Event recorded when this operation has been launched
275 GpuEventSynchronizer* communicateGpuHaloCoordinates(const t_commrec& cr,
277 GpuEventSynchronizer* dependencyEvent);
279 /*! \brief Wait for copy of nonlocal part of coordinate array from GPU to CPU
280 * following coordinate halo exchange
281 * \param [in] cr The commrec object
282 * \param [in] accumulateForces True if forces should accumulate, otherwise they are set
283 * \param [in] dependencyEvents Dependency events for this operation
285 void communicateGpuHaloForces(const t_commrec& cr,
286 bool accumulateForces,
287 gmx::FixedCapacityVector<GpuEventSynchronizer*, 2>* dependencyEvents);
289 /*! \brief Wraps the \c positions so that atoms from the same
290 * update group share the same periodic image wrt \c box.
292 * When DD and update groups are in use, the simulation master rank
293 * should call this to ensure that e.g. when restarting a simulation
294 * that did not use update groups that the coordinates satisfy the new
297 * This function can probably be removed when even single-rank
298 * simulations use domain decomposition, because then the choice of
299 * whether update groups are used is probably going to be the same
300 * regardless of the rank count.
302 * \param[in] dd The DD manager
303 * \param[in] mtop The system topology
304 * \param[in] box The global system box
305 * \param[in] positions The global system positions
307 void putUpdateGroupAtomsInSamePeriodicImage(const gmx_domdec_t& dd,
308 const gmx_mtop_t& mtop,
310 gmx::ArrayRef<gmx::RVec> positions);