2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declares implementation functions and types for the domain
38 * decomposition module.
40 * \author Berk Hess <hess@kth.se>
41 * \ingroup module_domdec
43 #ifndef GMX_DOMDEC_DOMDEC_INTERNAL_H
44 #define GMX_DOMDEC_DOMDEC_INTERNAL_H
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/timing/cyclecounter.h"
51 #include "gromacs/topology/block.h"
59 struct gmx_domdec_ind_t
61 /* The numbers of charge groups to send and receive for each cell
62 * that requires communication, the last entry contains the total
63 * number of atoms that needs to be communicated.
65 int nsend[DD_MAXIZONE+2];
66 int nrecv[DD_MAXIZONE+2];
67 /* The charge groups to send */
68 std::vector<int> index;
69 /* The atom range for non-in-place communication */
70 int cell2at0[DD_MAXIZONE];
71 int cell2at1[DD_MAXIZONE];
74 struct gmx_domdec_comm_dim_t
76 /* Returns the number of grid pulses (the number of domains in the halo along this dimension) */
82 int np_dlb; /* For dlb, for use with edlbAUTO */
83 std::vector<gmx_domdec_ind_t> ind; /* The indices to communicate, size np */
84 bool receiveInPlace; /* Can we receive data in place? */
87 /*! \brief Load balancing data along a dim used on the master rank of that dim */
92 real cellFracLowerMax; /**< State var.: max lower bound., incl. neighbors */
93 real cellFracUpperMin; /**< State var.: min upper bound., incl. neighbors */
94 real boundMin; /**< Temp. var.: lower limit for cell boundary */
95 real boundMax; /**< Temp. var.: upper limit for cell boundary */
98 std::vector<bool> isCellMin; /**< Temp. var.: is this cell size at the limit */
99 std::vector<real> cellFrac; /**< State var.: cell boundaries, box relative */
100 std::vector<real> oldCellFrac; /**< Temp. var.: old cell size */
101 std::vector<Bounds> bounds; /**< Cell bounds */
102 bool dlbIsLimited; /**< State var.: is DLB limited in this row */
103 std::vector<real> buf_ncd; /**< Temp. var. */
106 /*! \brief Struct for managing cell sizes with DLB along a dimension */
107 struct DDCellsizesWithDlb
109 /* Cell sizes for dynamic load balancing */
110 std::unique_ptr<RowMaster> rowMaster; /**< Cell row root struct, only available on the first rank in a row */
111 std::vector<real> fracRow; /**< The cell sizes, in fractions, along a row, not available on the first rank in a row */
112 real fracLower; /**< The lower corner, in fractions, in triclinic space */
113 real fracUpper; /**< The upper corner, in fractions, in triclinic space */
114 real fracLowerMax; /**< The maximum lower corner among all our neighbors */
115 real fracUpperMin; /**< The minimum upper corner among all our neighbors */
118 /*! \brief Struct for compute load commuication
120 * Here floats are accurate enough, since these variables
121 * only influence the load balancing, not the actual MD results.
125 int nload; /**< The number of load recordings */
126 float *load; /**< Scan of the sum of load over dimensions */
127 float sum; /**< The sum of the load over the ranks up to our current dimension */
128 float max; /**< The maximum over the ranks contributing to \p sum */
129 float sum_m; /**< Like \p sum, but takes the maximum when the load balancing is limited */
130 float cvol_min; /**< Minimum cell volume, relative to the box */
131 float mdf; /**< The PP time during which PME can overlap */
132 float pme; /**< The PME-only rank load */
133 int flags; /**< Bit flags that tell if DLB was limited, per dimension */
136 /*! \brief Data needed to sort an atom to the desired location in the local state */
139 int nsc; /**< Neighborsearch grid cell index */
140 int ind_gl; /**< Global atom/charge group index */
141 int ind; /**< Local atom/charge group index */
144 /*! \brief Temporary buffers for sorting atoms */
147 std::vector<gmx_cgsort_t> sorted; /**< Sorted array of indices */
148 std::vector<gmx_cgsort_t> stationary; /**< Array of stationary atom/charge group indices */
149 std::vector<gmx_cgsort_t> moved; /**< Array of moved atom/charge group indices */
150 std::vector<int> intBuffer; /**< Integer buffer for sorting */
153 /*! \brief Manages atom ranges and order for the local state atom vectors */
157 /*! \brief The local state atom order
159 * This enum determines the order of the atoms in the local state.
160 * ddnatHOME and ddnatZONE should be first and second,
161 * the others can be ordered as wanted.
163 enum class Type : int
165 Home, /**< The home atoms */
166 Zones, /**< All zones in the eighth shell */
167 Vsites, /**< Atoms communicated for virtual sites */
168 Constraints, /**< Atoms communicated for constraints */
169 Number /**< Not a count, only present for convenience */
172 /*! \brief Returns the start atom index for range \p rangeType */
173 int start(Type rangeType) const
175 if (rangeType == Type::Home)
181 return end_[static_cast<int>(rangeType) - 1];
185 /*! \brief Returns the end atom index for range \p rangeType */
186 int end(Type rangeType) const
188 return end_[static_cast<int>(rangeType)];
191 /*! \brief Returns the number of home atoms */
192 int numHomeAtoms() const
194 return end_[static_cast<int>(Type::Home)];
197 /*! \brief Returns the total number of atoms */
198 int numAtomsTotal() const
200 return end_[static_cast<int>(Type::Number) - 1];
203 /*! \brief Sets the end index of range \p rangeType to \p end
205 * This should be called either with Type::Home or with a type
206 * that is larger than that passed in the previous call to setEnd.
207 * A release assertion for these conditions are present.
209 void setEnd(Type rangeType,
212 GMX_RELEASE_ASSERT(rangeType == Type::Home || rangeType > lastTypeSet_, "Can only set either home or a larger type than the last one");
214 for (int i = static_cast<int>(rangeType); i < static_cast<int>(Type::Number); i++)
219 lastTypeSet_ = rangeType;
223 /*! \brief The list of end atom indices */
224 std::array<int, static_cast<int>(Type::Number)> end_;
225 Type lastTypeSet_ = Type::Number;
228 /*! \brief Enum of dynamic load balancing states
230 * Allowed DLB states and transitions
231 * - intialization at startup:
232 * -> offUser ("-dlb no")
233 * -> onUser ("-dlb yes")
234 * -> offCanTurnOn ("-dlb auto")
236 * - in automatic mode (i.e. initial state offCanTurnOn):
237 * offCanTurnOn -> onCanTurnOff
238 * offCanTurnOn -> offForever
239 * offCanTurnOn -> offTemporarilyLocked
240 * offTemporarilyLocked -> offCanTurnOn
241 * onCanTurnOff -> offCanTurnOn
245 offUser, /**< DLB is permanently off per user request */
246 offForever, /**< DLB is off due to a runtime condition (not supported or causes performance loss) and will never be turned on */
247 offCanTurnOn, /**< DLB is off and will turn on on imbalance */
248 offTemporarilyLocked, /**< DLB is off and temporarily can't turn on */
249 onCanTurnOff, /**< DLB is on and can turn off when slow */
250 onUser, /**< DLB is permanently on per user request */
251 nr /**< The number of DLB states */
254 /*! \brief The PME domain decomposition for one dimension */
257 int dim; /**< The dimension */
258 gmx_bool dim_match; /**< Tells if DD and PME dims match */
259 int nslab; /**< The number of PME ranks/domains in this dimension */
260 real *slb_dim_f; /**< Cell sizes for determining the PME comm. with SLB */
261 int *pp_min; /**< The minimum pp node location, size nslab */
262 int *pp_max; /**< The maximum pp node location, size nslab */
263 int maxshift; /**< The maximum shift for coordinate redistribution in PME */
268 real min0; /* The minimum bottom of this zone */
269 real max1; /* The maximum top of this zone */
270 real min1; /* The minimum top of this zone */
271 real mch0; /* The maximum bottom communicaton height for this zone */
272 real mch1; /* The maximum top communicaton height for this zone */
273 real p1_0; /* The bottom value of the first cell in this zone */
274 real p1_1; /* The top value of the first cell in this zone */
277 /*! \brief The number of reals in gmx_ddzone_t */
278 constexpr int c_ddzoneNumReals = 7;
280 /*! \brief Forward declaration */
281 template<typename T> class DDBufferAccess;
283 /*! \brief Temporary storage container that minimizes (re)allocation and clearing
285 * This is only the storage, actual access happens through DDBufferAccess.
286 * All methods check if the buffer is (not) in use.
292 /*! \brief Returns a buffer of size \p numElements, the elements are undefined */
293 gmx::ArrayRef<T> resize(size_t numElements)
295 GMX_ASSERT(isInUse_, "Should only operate on acquired buffers");
297 if (numElements > buffer_.size())
299 buffer_.resize(numElements);
302 return gmx::arrayRefFromArray(buffer_.data(), numElements);
305 /*! \brief Acquire the buffer for use with size set to \p numElements, the elements are undefined */
306 gmx::ArrayRef<T> acquire(size_t numElements)
308 GMX_RELEASE_ASSERT(!isInUse_, "Should only request free buffers");
311 return resize(numElements);
314 /*! \brief Releases the buffer, buffer_ should not be used after this */
317 GMX_RELEASE_ASSERT(isInUse_, "Should only release buffers in use");
321 std::vector<T> buffer_; /**< The actual memory buffer */
322 bool isInUse_ = false; /**< Flag that tells whether the buffer is in use */
324 friend class DDBufferAccess<T>;
327 /*! \brief Class that manages access to a temporary memory buffer */
332 /*! \brief Constructor, returns a buffer of size \p numElements, element values are undefined
334 * \note The actual memory buffer \p ddBuffer can not be used to
335 * create other DDBufferAccess objects until the one created
338 DDBufferAccess(DDBuffer<T> &ddBuffer,
339 size_t numElements) :
342 buffer = ddBuffer_.acquire(numElements);
345 /*! \brief Destructor */
351 /*! \brief Resizes the buffer to \p numElements, new elements are undefined
353 * \note The buffer arrayref is updated after this call.
355 void resize(size_t numElements)
357 buffer = ddBuffer_.resize(numElements);
361 DDBuffer<T> &ddBuffer_; /**< Reference to the storage class */
363 gmx::ArrayRef<T> buffer; /**< The access to the memory buffer */
366 /*! brief Temporary buffer for setting up communiation over one pulse and all zones in the halo */
367 struct dd_comm_setup_work_t
369 std::vector<int> localAtomGroupBuffer; /**< The local atom group indices to send */
370 std::vector<int> atomGroupBuffer; /**< Buffer for collecting the global atom group indices to send */
371 std::vector<gmx::RVec> positionBuffer; /**< Buffer for collecting the atom group positions to send */
372 int nat; /**< The number of atoms contained in the atom groups to send */
373 int nsend_zone; /**< The number of atom groups to send for the last zone */
376 /*! \brief Struct for domain decomposition communication
378 * This struct contains most information about domain decomposition
379 * communication setup, some communication buffers, some statistics
380 * and also the setup for the communication between particle-particle
381 * and PME only ranks.
383 * All arrays are indexed with 0 to dd->ndim (not Cartesian indexing),
384 * unless stated otherwise.
386 struct gmx_domdec_comm_t // NOLINT (clang-analyzer-optin.performance.Padding)
388 /* PME and Cartesian communicator stuff */
389 int npmedecompdim; /**< The number of decomposition dimensions for PME, 0: no PME */
390 int npmenodes; /**< The number of ranks doing PME (PP/PME or only PME) */
391 int npmenodes_x; /**< The number of PME ranks/domains along x */
392 int npmenodes_y; /**< The number of PME ranks/domains along y */
393 gmx_bool bCartesianPP_PME; /**< Use Cartesian communication between PP and PME ranks */
394 ivec ntot; /**< Cartesian grid for combinted PP+PME ranks */
395 int cartpmedim; /**< The number of dimensions for the PME setup that are Cartesian */
396 int *pmenodes; /**< The PME ranks, size npmenodes */
397 int *ddindex2simnodeid; /**< The Cartesian index to sim rank conversion, used with bCartesianPP_PME */
398 gmx_ddpme_t ddpme[2]; /**< The 1D or 2D PME domain decomposition setup */
400 /* The DD particle-particle nodes only */
401 gmx_bool bCartesianPP; /**< Use a Cartesian communicator for PP */
402 int *ddindex2ddnodeid; /**< The Cartesian index to DD rank conversion, used with bCartesianPP */
404 /* The DLB state, used for reloading old states, during e.g. EM */
405 t_block cgs_gl; /**< The global charge groups, this defined the DD state (except for the DLB state) */
407 /* Charge group / atom sorting */
408 std::unique_ptr<gmx_domdec_sort_t> sort; /**< Data structure for cg/atom sorting */
410 /* Are there charge groups? */
411 gmx_bool bCGs; /**< True when there are charge groups */
413 gmx_bool bInterCGBondeds; /**< Are there inter-cg bonded interactions? */
414 gmx_bool bInterCGMultiBody; /**< Are there inter-cg multi-body interactions? */
416 /* Data for the optional bonded interaction atom communication range */
417 gmx_bool bBondComm; /**< Only communicate atoms beyond the non-bonded cut-off when they are involved in bonded interactions with non-local atoms */
418 t_blocka *cglink; /**< Links between cg's through bonded interactions */
419 char *bLocalCG; /**< Local cg availability, TODO: remove when group scheme is removed */
421 /* The DLB state, possible values are defined above */
423 /* With dlbState=DlbState::offCanTurnOn, should we check if to DLB on at the next DD? */
424 gmx_bool bCheckWhetherToTurnDlbOn;
425 /* The first DD count since we are running without DLB */
426 int ddPartioningCountFirstDlbOff = 0;
428 /* Cell sizes for static load balancing, first index cartesian */
431 /* The width of the communicated boundaries */
432 real cutoff_mbody; /**< Cut-off for multi-body interactions, also 2-body bonded when \p cutoff_mody > \p cutoff */
433 real cutoff; /**< Cut-off for non-bonded/2-body interactions */
434 rvec cellsize_min; /**< The minimum guaranteed cell-size, Cartesian indexing */
435 rvec cellsize_min_dlb; /**< The minimum guaranteed cell-size with dlb=auto */
436 real cellsize_limit; /**< The lower limit for the DD cell size with DLB */
437 gmx_bool bVacDLBNoLimit; /**< Effectively no NB cut-off limit with DLB for systems without PBC? */
439 /** With PME load balancing we set limits on DLB */
440 gmx_bool bPMELoadBalDLBLimits;
441 /** DLB needs to take into account that we want to allow this maximum
442 * cut-off (for PME load balancing), this could limit cell boundaries.
444 real PMELoadBal_max_cutoff;
446 ivec tric_dir; /**< tric_dir from \p gmx_domdec_box_t is only stored here because dd_get_ns_ranges needs it */
447 rvec box0; /**< box lower corner, required with dim's without pbc and -gcom */
448 rvec box_size; /**< box size, required with dim's without pbc and -gcom */
450 rvec cell_x0; /**< The DD cell lower corner, in triclinic space */
451 rvec cell_x1; /**< The DD cell upper corner, in triclinic space */
453 rvec old_cell_x0; /**< The old \p cell_x0, to check cg displacements */
454 rvec old_cell_x1; /**< The old \p cell_x1, to check cg displacements */
456 /** The communication setup and charge group boundaries for the zones */
457 gmx_domdec_zones_t zones;
459 /* The zone limits for DD dimensions 1 and 2 (not 0), determined from
460 * cell boundaries of neighboring cells for staggered grids when using
461 * dynamic load balancing.
463 gmx_ddzone_t zone_d1[2]; /**< Zone limits for dim 1 with staggered grids */
464 gmx_ddzone_t zone_d2[2][2]; /**< Zone limits for dim 2 with staggered grids */
466 /** The coordinate/force communication setup and indices */
467 gmx_domdec_comm_dim_t cd[DIM];
468 /** The maximum number of cells to communicate with in one dimension */
471 /** Which cg distribution is stored on the master node,
472 * stored as DD partitioning call count.
474 int64_t master_cg_ddp_count;
476 /** The number of cg's received from the direct neighbors */
477 int zone_ncg1[DD_MAXZONE];
479 /** The atom ranges in the local state */
480 DDAtomRanges atomRanges;
482 /** Array for signalling if atoms have moved to another domain */
483 std::vector<int> movedBuffer;
485 /** Communication int buffer for general use */
486 DDBuffer<int> intBuffer;
488 /** Communication rvec buffer for general use */
489 DDBuffer<gmx::RVec> rvecBuffer;
491 /* Temporary storage for thread parallel communication setup */
492 std::vector<dd_comm_setup_work_t> dth; /**< Thread-local work data */
494 /* Communication buffer only used with multiple grid pulses */
495 DDBuffer<gmx::RVec> rvecBuffer2; /**< Another rvec comm. buffer */
497 /* Communication buffers for local redistribution */
498 std::array<std::vector<int>, DIM*2> cggl_flag; /**< Charge group flag comm. buffers */
499 std::array<std::vector<gmx::RVec>, DIM*2> cgcm_state; /**< Charge group center comm. buffers */
501 /* Cell sizes for dynamic load balancing */
502 std::vector<DDCellsizesWithDlb> cellsizesWithDlb;
504 /* Stuff for load communication */
505 gmx_bool bRecordLoad; /**< Should we record the load */
506 domdec_load_t *load; /**< The recorded load data */
507 int nrank_gpu_shared; /**< The number of MPI ranks sharing the GPU our rank is using */
509 MPI_Comm *mpi_comm_load; /**< The MPI load communicator */
510 MPI_Comm mpi_comm_gpu_shared; /**< The MPI load communicator for ranks sharing a GPU */
513 /* Information for managing the dynamic load balancing */
514 int dlb_scale_lim; /**< Maximum DLB scaling per load balancing step in percent */
516 BalanceRegion *balanceRegion; /**< Struct for timing the force load balancing region */
518 /* Cycle counters over nstlist steps */
519 float cycl[ddCyclNr]; /**< Total cycles counted */
520 int cycl_n[ddCyclNr]; /**< The number of cycle recordings */
521 float cycl_max[ddCyclNr]; /**< The maximum cycle count */
522 /** Flop counter (0=no,1=yes,2=with (eFlop-1)*5% noise */
524 double flop; /**< Total flops counted */
525 int flop_n; /**< The number of flop recordings */
526 /** How many times did we have load measurements */
528 /** How many times have we collected the load measurements */
531 /* Cycle count history for DLB checks */
532 float cyclesPerStepBeforeDLB; /**< The averaged cycles per step over the last nstlist step before turning on DLB */
533 float cyclesPerStepDlbExpAverage; /**< The running average of the cycles per step during DLB */
534 bool haveTurnedOffDlb; /**< Have we turned off DLB (after turning DLB on)? */
535 int64_t dlbSlowerPartitioningCount; /**< The DD step at which we last measured that DLB off was faster than DLB on, 0 if there was no such step */
537 /* Statistics for atoms */
538 double sum_nat[static_cast<int>(DDAtomRanges::Type::Number)]; /**< The atoms per range, summed over the steps */
540 /* Statistics for calls and times */
541 int ndecomp; /**< The number of partioning calls */
542 int nload; /**< The number of load recordings */
543 double load_step; /**< Total MD step time */
544 double load_sum; /**< Total PP force time */
545 double load_max; /**< Max \p load_sum over the ranks */
546 ivec load_lim; /**< Was load balancing limited, per DD dim */
547 double load_mdf; /**< Total time on PP done during PME overlap time */
548 double load_pme; /**< Total time on our PME-only rank */
550 /** The last partition step */
551 int64_t partition_step;
554 int nstDDDump; /**< Step interval for dumping the local+non-local atoms to pdb */
555 int nstDDDumpGrid; /**< Step interval for duming the DD grid to pdb */
556 int DD_debug; /**< DD debug print level: 0, 1, 2 */
559 /*! \brief DD zone permutation
561 * Zone permutation from the Cartesian x-major/z-minor order to an order
562 * that leads to consecutive charge groups for neighbor searching.
563 * TODO: remove when the group scheme is removed
565 static const int zone_perm[3][4] = { {0, 0, 0, 0}, {1, 0, 0, 0}, {3, 0, 1, 2} };
567 /*! \brief DD zone reordering to Cartesian order
569 * Index to reorder the zone such that the end up in Cartesian order
570 * with dimension index 0 major and dimension index 2 minor.
572 static const int zone_reorder_cartesian[DD_MAXZONE] = { 0, 1, 3, 2, 5, 4, 6, 7 };
574 /* dd_zo and dd_zp3 is set up such that i zones with non-zero
575 * components see only j zones with that component 0.
578 /*! \brief Returns the DD cut-off distance for multi-body interactions */
579 real dd_cutoff_multibody(const gmx_domdec_t *dd);
581 /*! \brief Returns the DD cut-off distance for two-body interactions */
582 real dd_cutoff_twobody(const gmx_domdec_t *dd);
584 /*! \brief Returns the domain index given the number of domains and the domain coordinates
586 * This order is required to minimize the coordinate communication in PME
587 * which uses decomposition in the x direction.
589 static inline int dd_index(const ivec numDomains,
590 const ivec domainCoordinates)
592 return ((domainCoordinates[XX]*numDomains[YY] + domainCoordinates[YY])*numDomains[ZZ]) + domainCoordinates[ZZ];
595 /*! Returns the size of the buffer to hold fractional cell boundaries for DD dimension index dimIndex */
596 static inline int ddCellFractionBufferSize(const gmx_domdec_t *dd,
599 return dd->nc[dd->dim[dimIndex ]] + 1 + dimIndex*2 + 1 + dimIndex;
602 /*! \brief Maximum number of ranks for using send/recv for state scattering and gathering
604 * Use separate MPI send and receive commands
605 * when #ranks <= c_maxNumRanksUseSendRecvForScatterAndGather
606 * This saves memory (and some copying for small #ranks).
607 * For high parallelization scatter and gather calls are used.
609 static constexpr int c_maxNumRanksUseSendRecvForScatterAndGather = 4;
611 /*! \brief Make DD cells larger by this factor than the limit to avoid rounding issues */
612 static constexpr double DD_CELL_MARGIN = 1.0001;
614 /*! \brief Factor for checking DD cell size limitation during DLB, should be in between 1 and DD_CELL_MARGIN */
615 static constexpr double DD_CELL_MARGIN2 = 1.00005;
617 /*! \brief With pressure scaling, keep cell sizes 2% above the limit to allow for some scaling */
618 static constexpr double DD_PRES_SCALE_MARGIN = 1.02;