2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2008,2009,2010,2011,2012 by the GROMACS development team.
5 * Copyright (c) 2013,2014,2015,2017,2018 by the GROMACS development team.
6 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
40 * \brief This file defines functions used by the domdec module
41 * in its initial setup phase.
43 * \author Berk Hess <hess@kth.se>
44 * \ingroup module_domdec
49 #include "domdec_setup.h"
58 #include "gromacs/domdec/domdec.h"
59 #include "gromacs/domdec/domdec_struct.h"
60 #include "gromacs/domdec/options.h"
61 #include "gromacs/ewald/pme.h"
62 #include "gromacs/gmxlib/network.h"
63 #include "gromacs/math/units.h"
64 #include "gromacs/math/utilities.h"
65 #include "gromacs/math/vec.h"
66 #include "gromacs/mdlib/perf_est.h"
67 #include "gromacs/mdtypes/commrec.h"
68 #include "gromacs/mdtypes/inputrec.h"
69 #include "gromacs/mdtypes/md_enums.h"
70 #include "gromacs/pbcutil/pbc.h"
71 #include "gromacs/topology/topology.h"
72 #include "gromacs/utility/fatalerror.h"
73 #include "gromacs/utility/logger.h"
74 #include "gromacs/utility/mdmodulesnotifiers.h"
75 #include "gromacs/utility/stringutil.h"
78 #include "domdec_internal.h"
81 // TODO remove this when moving domdec into gmx namespace
82 using gmx::DomdecOptions;
84 /*! \brief Margin for setting up the DD grid */
85 #define DD_GRID_MARGIN_PRES_SCALE 1.05
87 /*! \brief Factorize \p n.
89 * \param[in] n Value to factorize
90 * \param[out] fac Vector of factors (to be allocated in this function)
91 * \param[out] mfac Vector with the number of times each factor repeats in the factorization (to be allocated in this function)
93 static void factorize(int n, std::vector<int>* fac, std::vector<int>* mfac)
97 gmx_fatal(FARGS, "Can only factorize positive integers.");
100 /* Decompose n in factors */
108 if (fac->empty() || fac->back() != d)
123 /*! \brief Find largest divisor of \p n smaller than \p n*/
124 static int largest_divisor(int n)
126 std::vector<int> div;
127 std::vector<int> mdiv;
128 factorize(n, &div, &mdiv);
133 /*! \brief Returns TRUE when there are enough PME ranks for the ratio */
134 static gmx_bool fits_pme_ratio(int nrank_tot, int nrank_pme, float ratio)
136 return (static_cast<double>(nrank_pme) / static_cast<double>(nrank_tot) > 0.95 * ratio);
139 /*! \brief Returns TRUE when npme out of ntot ranks doing PME is expected to give reasonable performance */
140 static gmx_bool fits_pp_pme_perf(int ntot, int npme, float ratio)
142 std::vector<int> div;
143 std::vector<int> mdiv;
144 factorize(ntot - npme, &div, &mdiv);
146 int npp_root3 = gmx::roundToInt(std::cbrt(ntot - npme));
147 int npme_root2 = gmx::roundToInt(std::sqrt(static_cast<double>(npme)));
149 /* The check below gives a reasonable division:
150 * factor 5 allowed at 5 or more PP ranks,
151 * factor 7 allowed at 49 or more PP ranks.
153 if (div.back() > 3 + npp_root3)
158 /* Check if the number of PP and PME ranks have a reasonable sized
159 * denominator in common, such that we can use 2D PME decomposition
160 * when required (which requires nx_pp == nx_pme).
161 * The factor of 2 allows for a maximum ratio of 2^2=4
162 * between nx_pme and ny_pme.
164 if (std::gcd(ntot - npme, npme) * 2 < npme_root2)
169 /* Does this division gives a reasonable PME load? */
170 return fits_pme_ratio(ntot, npme, ratio);
173 /*! \brief Make a guess for the number of PME ranks to use. */
174 static int guess_npme(const gmx::MDLogger& mdlog,
175 const gmx_mtop_t& mtop,
176 const t_inputrec& ir,
180 float ratio = pme_load_estimate(mtop, ir, box);
182 GMX_LOG(mdlog.info).appendTextFormatted("Guess for relative PME load: %.2f", ratio);
184 /* We assume the optimal rank ratio is close to the load ratio.
185 * The communication load is neglected,
186 * but (hopefully) this will balance out between PP and PME.
189 if (!fits_pme_ratio(nrank_tot, nrank_tot / 2, ratio))
191 /* We would need more than nrank_tot/2 PME only nodes,
192 * which is not possible. Since the PME load is very high,
193 * we will not loose much performance when all ranks do PME.
199 /* First try to find npme as a factor of nrank_tot up to nrank_tot/3.
200 * We start with a minimum PME node fraction of 1/16
201 * and avoid ratios which lead to large prime factors in nnodes-npme.
203 int npme = (nrank_tot + 15) / 16;
204 while (npme <= nrank_tot / 3)
206 if (nrank_tot % npme == 0)
208 /* Note that fits_perf might change the PME grid,
209 * in the current implementation it does not.
211 if (fits_pp_pme_perf(nrank_tot, npme, ratio))
218 if (npme > nrank_tot / 3)
220 /* Try any possible number for npme */
222 while (npme <= nrank_tot / 2)
224 /* Note that fits_perf may change the PME grid */
225 if (fits_pp_pme_perf(nrank_tot, npme, ratio))
232 if (npme > nrank_tot / 2)
235 "Could not find an appropriate number of separate PME ranks. i.e. >= %5f*#ranks "
236 "(%d) and <= #ranks/2 (%d) and reasonable performance wise (grid_x=%d, "
238 "Use the -npme option of mdrun or change the number of ranks or the PME grid "
239 "dimensions, see the manual for details.",
241 gmx::roundToInt(0.95 * ratio * nrank_tot),
249 .appendTextFormatted(
250 "Will use %d particle-particle and %d PME only ranks\n"
251 "This is a guess, check the performance at the end of the log file",
259 /*! \brief Return \p n divided by \p f rounded up to the next integer. */
260 static int div_up(int n, int f)
262 return (n + f - 1) / f;
265 real comm_box_frac(const gmx::IVec& dd_nc, real cutoff, const gmx_ddbox_t& ddbox)
269 for (int i = 0; i < DIM; i++)
271 real bt = ddbox.box_size[i] * ddbox.skew_fac[i];
272 nw[i] = dd_nc[i] * cutoff / bt;
276 for (int i = 0; i < DIM; i++)
281 for (int j = i + 1; j < DIM; j++)
285 comm_vol += nw[i] * nw[j] * M_PI / 4;
286 for (int k = j + 1; k < DIM; k++)
290 comm_vol += nw[i] * nw[j] * nw[k] * M_PI / 6;
301 /*! \brief Return whether the DD inhomogeneous in the z direction */
302 static gmx_bool inhomogeneous_z(const t_inputrec& ir)
304 return ((EEL_PME(ir.coulombtype) || ir.coulombtype == CoulombInteractionType::Ewald)
305 && ir.pbcType == PbcType::Xyz && ir.ewald_geometry == EwaldGeometry::ThreeDC);
308 /*! \brief Estimate cost of PME FFT communication
310 * This only takes the communication into account and not imbalance
311 * in the calculation. But the imbalance in communication and calculation
312 * are similar and therefore these formulas also prefer load balance
313 * in the FFT and pme_solve calculation.
315 static float comm_pme_cost_vol(int npme, int a, int b, int c)
317 /* We use a float here, since an integer might overflow */
318 float comm_vol = npme - 1;
320 comm_vol *= div_up(a, npme);
321 comm_vol *= div_up(b, npme);
327 /*! \brief Estimate cost of communication for a possible domain decomposition. */
328 static float comm_cost_est(real limit,
331 const gmx_ddbox_t& ddbox,
333 const t_inputrec& ir,
338 gmx::IVec npme = { 1, 1, 1 };
340 /* This is the cost of a pbc_dx call relative to the cost
341 * of communicating the coordinate and force of an atom.
342 * This will be machine dependent.
343 * These factors are for x86 with SMP or Infiniband.
345 float pbcdx_rect_fac = 0.1;
346 float pbcdx_tric_fac = 0.2;
348 /* Check the DD algorithm restrictions */
349 if ((ir.pbcType == PbcType::XY && ir.nwall < 2 && nc[ZZ] > 1)
350 || (ir.pbcType == PbcType::Screw && (nc[XX] == 1 || nc[YY] > 1 || nc[ZZ] > 1)))
355 if (inhomogeneous_z(ir) && nc[ZZ] > 1)
360 assert(ddbox.npbcdim <= DIM);
362 /* Check if the triclinic requirements are met */
363 for (int i = 0; i < DIM; i++)
365 for (int j = i + 1; j < ddbox.npbcdim; j++)
367 if (box[j][i] != 0 || ir.deform[j][i] != 0
368 || (ir.epc != PressureCoupling::No && ir.compress[j][i] != 0))
370 if (nc[j] > 1 && nc[i] == 1)
378 for (int i = 0; i < DIM; i++)
380 bt[i] = ddbox.box_size[i] * ddbox.skew_fac[i];
382 /* Without PBC and with 2 cells, there are no lower limits on the cell size */
383 if (!(i >= ddbox.npbcdim && nc[i] <= 2) && bt[i] < nc[i] * limit)
387 /* With PBC, check if the cut-off fits in nc[i]-1 cells */
388 if (i < ddbox.npbcdim && nc[i] > 1 && (nc[i] - 1) * bt[i] < nc[i] * cutoff)
396 /* The following choices should match those
397 * in init_domain_decomposition in domdec.c.
399 if (nc[XX] == 1 && nc[YY] > 1)
404 else if (nc[YY] == 1)
411 /* Will we use 1D or 2D PME decomposition? */
412 npme[XX] = (npme_tot % nc[XX] == 0) ? nc[XX] : npme_tot;
413 npme[YY] = npme_tot / npme[XX];
417 if (EEL_PME(ir.coulombtype) || EVDW_PME(ir.vdwtype))
419 /* Check the PME grid restrictions.
420 * Currently these can only be invalid here with too few grid lines
421 * along the x dimension per rank doing PME.
423 int npme_x = (npme_tot > 1 ? npme[XX] : nc[XX]);
425 /* Currently we don't have the OpenMP thread count available here.
426 * But with threads we have only tighter restrictions and it's
427 * probably better anyhow to avoid settings where we need to reduce
428 * grid lines over multiple ranks, as the thread check will do.
430 bool useThreads = true;
431 bool errorsAreFatal = false;
432 if (!gmx_pme_check_restrictions(
433 ir.pme_order, ir.nkx, ir.nky, ir.nkz, npme_x, useThreads, errorsAreFatal))
439 /* When two dimensions are (nearly) equal, use more cells
440 * for the smallest index, so the decomposition does not
441 * depend sensitively on the rounding of the box elements.
443 for (int i = 0; i < DIM; i++)
445 for (int j = i + 1; j < DIM; j++)
447 /* Check if the box size is nearly identical,
448 * in that case we prefer nx > ny and ny > nz.
450 if (std::fabs(bt[j] - bt[i]) < 0.01 * bt[i] && nc[j] > nc[i])
452 /* The XX/YY check is a bit compact. If nc[YY]==npme[YY]
453 * this means the swapped nc has nc[XX]==npme[XX],
454 * and we can also swap X and Y for PME.
456 /* Check if dimension i and j are equivalent for PME.
457 * For x/y: if nc[YY]!=npme[YY], we can not swap x/y
458 * For y/z: we can not have PME decomposition in z
461 || !((i == XX && j == YY && nc[YY] != npme[YY]) || (i == YY && j == ZZ && npme[YY] > 1)))
469 /* This function determines only half of the communication cost.
470 * All PP, PME and PP-PME communication is symmetric
471 * and the "back"-communication cost is identical to the forward cost.
474 float comm_vol = comm_box_frac(nc, cutoff, ddbox);
477 for (int i = 0; i < 2; i++)
479 /* Determine the largest volume for PME x/f redistribution */
480 if (nc[i] % npme[i] != 0)
483 (nc[i] > npme[i]) ? (npme[i] == 2 ? 1.0 / 3.0 : 0.5)
484 : (1.0 - std::gcd(nc[i], npme[i]) / static_cast<double>(npme[i]));
485 comm_pme += 3 * natoms * comm_vol_xf;
488 /* Grid overlap communication */
491 const int nk = (i == 0 ? ir.nkx : ir.nky);
492 const int overlap = (nk % npme[i] == 0 ? ir.pme_order - 1 : ir.pme_order);
493 float temp = npme[i];
500 /* Old line comm_pme += npme[i]*overlap*ir.nkx*ir.nky*ir.nkz/nk; */
504 comm_pme += comm_pme_cost_vol(npme[YY], ir.nky, ir.nkz, ir.nkx);
505 comm_pme += comm_pme_cost_vol(npme[XX], ir.nkx, ir.nky, ir.nkz);
507 /* Add cost of pbc_dx for bondeds */
508 float cost_pbcdx = 0;
509 if ((nc[XX] == 1 || nc[YY] == 1) || (nc[ZZ] == 1 && ir.pbcType != PbcType::XY))
511 if ((ddbox.tric_dir[XX] && nc[XX] == 1) || (ddbox.tric_dir[YY] && nc[YY] == 1))
513 cost_pbcdx = pbcdxr * pbcdx_tric_fac;
517 cost_pbcdx = pbcdxr * pbcdx_rect_fac;
524 "nc %2d %2d %2d %2d %2d vol pp %6.4f pbcdx %6.4f pme %9.3e tot %9.3e\n",
532 comm_pme / (3 * natoms),
533 comm_vol + cost_pbcdx + comm_pme / (3 * natoms));
536 return 3 * natoms * (comm_vol + cost_pbcdx) + comm_pme;
539 /*! \brief Assign penalty factors to possible domain decompositions,
540 * based on the estimated communication costs. */
541 static void assign_factors(const real limit,
544 const gmx_ddbox_t& ddbox,
546 const t_inputrec& ir,
555 gmx::IVec& ir_try = *irTryPtr;
560 const float ce = comm_cost_est(limit, cutoff, box, ddbox, natoms, ir, pbcdxr, npme, ir_try);
563 || ce < comm_cost_est(limit, cutoff, box, ddbox, natoms, ir, pbcdxr, npme, *opt)))
571 for (int x = mdiv[0]; x >= 0; x--)
573 for (int i = 0; i < x; i++)
575 ir_try[XX] *= div[0];
577 for (int y = mdiv[0] - x; y >= 0; y--)
579 for (int i = 0; i < y; i++)
581 ir_try[YY] *= div[0];
583 for (int i = 0; i < mdiv[0] - x - y; i++)
585 ir_try[ZZ] *= div[0];
590 limit, cutoff, box, ddbox, natoms, ir, pbcdxr, npme, ndiv - 1, div + 1, mdiv + 1, irTryPtr, opt);
592 for (int i = 0; i < mdiv[0] - x - y; i++)
594 ir_try[ZZ] /= div[0];
596 for (int i = 0; i < y; i++)
598 ir_try[YY] /= div[0];
601 for (int i = 0; i < x; i++)
603 ir_try[XX] /= div[0];
608 /*! \brief Determine the optimal distribution of DD cells for the
609 * simulation system and number of MPI ranks
611 * \returns The optimal grid cell choice. The latter will contain all
612 * zeros if no valid cell choice exists. */
613 static gmx::IVec optimizeDDCells(const gmx::MDLogger& mdlog,
614 const int numRanksRequested,
615 const int numPmeOnlyRanks,
616 const real cellSizeLimit,
617 const gmx_mtop_t& mtop,
619 const gmx_ddbox_t& ddbox,
620 const t_inputrec& ir,
621 const DDSystemInfo& systemInfo)
625 const int numPPRanks = numRanksRequested - numPmeOnlyRanks;
628 .appendTextFormatted(
629 "Optimizing the DD grid for %d cells with a minimum initial size of %.3f nm",
632 if (inhomogeneous_z(ir))
635 .appendTextFormatted(
636 "Ewald_geometry=%s: assuming inhomogeneous particle distribution in z, "
637 "will not decompose in z.",
638 enumValueToString(ir.ewald_geometry));
642 // For cost estimates, we need the number of ranks doing PME work,
643 // which is the number of PP ranks when not using separate
645 const int numRanksDoingPmeWork =
646 (EEL_PME(ir.coulombtype) ? ((numPmeOnlyRanks > 0) ? numPmeOnlyRanks : numPPRanks) : 0);
648 if (systemInfo.haveInterDomainBondeds)
650 /* If we can skip PBC for distance calculations in plain-C bondeds,
651 * we can save some time (e.g. 3D DD with pbc=xyz).
652 * Here we ignore SIMD bondeds as they always do (fast) PBC.
654 count_bonded_distances(mtop, ir, &pbcdxr, nullptr);
655 pbcdxr /= static_cast<double>(mtop.natoms);
659 /* Every molecule is a single charge group: no pbc required */
663 if (cellSizeLimit > 0)
665 std::string maximumCells = "The maximum allowed number of cells is:";
666 for (int d = 0; d < DIM; d++)
668 int nmax = static_cast<int>(ddbox.box_size[d] * ddbox.skew_fac[d] / cellSizeLimit);
669 if (d >= ddbox.npbcdim && nmax < 2)
673 if (d == ZZ && inhomogeneous_z(ir))
677 maximumCells += gmx::formatString(" %c %d", 'X' + d, nmax);
679 GMX_LOG(mdlog.info).appendText(maximumCells);
684 fprintf(debug, "Average nr of pbc_dx calls per atom %.2f\n", pbcdxr);
687 /* Decompose numPPRanks in factors */
688 std::vector<int> div;
689 std::vector<int> mdiv;
690 factorize(numPPRanks, &div, &mdiv);
692 gmx::IVec itry = { 1, 1, 1 };
693 gmx::IVec numDomains = { 0, 0, 0 };
694 assign_factors(cellSizeLimit,
701 numRanksDoingPmeWork,
711 real getDDGridSetupCellSizeLimit(const gmx::MDLogger& mdlog,
712 const bool bDynLoadBal,
713 const real dlb_scale,
714 const t_inputrec& ir,
715 real systemInfoCellSizeLimit)
717 real cellSizeLimit = systemInfoCellSizeLimit;
719 /* Add a margin for DLB and/or pressure scaling */
722 if (dlb_scale >= 1.0)
724 gmx_fatal(FARGS, "The value for option -dds should be smaller than 1");
727 .appendTextFormatted(
728 "Scaling the initial minimum size with 1/%g (option -dds) = %g", dlb_scale, 1 / dlb_scale);
729 cellSizeLimit /= dlb_scale;
731 else if (ir.epc != PressureCoupling::No)
734 .appendTextFormatted(
735 "To account for pressure scaling, scaling the initial minimum size with %g",
736 DD_GRID_MARGIN_PRES_SCALE);
737 cellSizeLimit *= DD_GRID_MARGIN_PRES_SCALE;
740 return cellSizeLimit;
743 gmx::SeparatePmeRanksPermitted checkForSeparatePmeRanks(const gmx::MDModulesNotifiers& notifiers,
744 const DomdecOptions& options,
745 int numRanksRequested,
746 bool useGpuForNonbonded,
749 gmx::SeparatePmeRanksPermitted separatePmeRanksPermitted;
751 /* Permit MDModules to notify whether they want to use PME-only ranks */
752 notifiers.simulationSetupNotifier_.notify(&separatePmeRanksPermitted);
754 /* With NB GPUs we don't automatically use PME-only CPU ranks. PME ranks can
755 * improve performance with many threads per GPU, since our OpenMP
756 * scaling is bad, but it's difficult to automate the setup.
758 if (useGpuForNonbonded && options.numPmeRanks < 0)
760 separatePmeRanksPermitted.disablePmeRanks(
761 "PME-only CPU ranks are not automatically used when "
762 "non-bonded interactions are computed on GPUs");
765 /* If GPU is used for PME then only 1 PME rank is permitted */
766 if (useGpuForPme && (options.numPmeRanks < 0 || options.numPmeRanks > 1))
768 separatePmeRanksPermitted.disablePmeRanks(
769 "PME GPU decomposition is not supported, only one separate PME-only GPU rank "
773 // With explicit DD grid setup we do not automatically use PME-only ranks
774 if (options.numCells[XX] > 0 && options.numPmeRanks < 0)
776 separatePmeRanksPermitted.disablePmeRanks("explicit DD grid requested");
779 // Controls the automated choice of when to use separate PME-only ranks.
780 const int minRankCountToDefaultToSeparatePmeRanks = 19;
782 // Check if we have enough total ranks to use PME-only ranks
783 if (numRanksRequested < minRankCountToDefaultToSeparatePmeRanks && options.numPmeRanks < 0)
785 separatePmeRanksPermitted.disablePmeRanks(
786 "there are too few total ranks for efficient splitting");
789 return separatePmeRanksPermitted;
792 void checkForValidRankCountRequests(const int numRanksRequested,
794 const int numPmeRanksRequested,
795 const gmx::SeparatePmeRanksPermitted& separatePmeRanksPermitted,
796 const bool checkForLargePrimeFactors)
798 int numPPRanksRequested = numRanksRequested;
799 if (usingPme && numPmeRanksRequested > 0)
801 numPPRanksRequested -= numPmeRanksRequested;
802 if (numPmeRanksRequested > numPPRanksRequested)
805 "Cannot have %d separate PME ranks with only %d PP ranks, choose fewer or no "
806 "separate PME ranks",
807 numPmeRanksRequested,
808 numPPRanksRequested);
812 /* If simulation is not using PME but -npme > 0 then inform user and throw an error */
813 if (!usingPme && numPmeRanksRequested > 0)
816 "The system does not use PME for electrostatics or LJ. Requested "
817 "-npme %d option is not viable.",
818 numPmeRanksRequested);
821 /* If some parts of the code could not use PME-only ranks and
822 * user explicitly used mdrun -npme > 0 option then throw an error */
823 if (!separatePmeRanksPermitted.permitSeparatePmeRanks() && numPmeRanksRequested > 0)
826 "Cannot have %d separate PME ranks because: %s",
827 numPmeRanksRequested,
828 separatePmeRanksPermitted.reasonsWhyDisabled().c_str());
831 // Once the rank count is large enough, it becomes worth
832 // suggesting improvements to the user.
833 const int minPPRankCountToCheckForLargePrimeFactors = 13;
834 if (checkForLargePrimeFactors && numPPRanksRequested >= minPPRankCountToCheckForLargePrimeFactors)
836 const int largestDivisor = largest_divisor(numPPRanksRequested);
837 /* Check if the largest divisor is more than numPPRanks ^ (2/3) */
838 if (largestDivisor * largestDivisor * largestDivisor > numPPRanksRequested * numPPRanksRequested)
841 "The number of ranks selected for particle-particle work (%d) "
842 "contains a large prime factor %d. In most cases this will lead to "
843 "bad performance. Choose a number with smaller prime factors or "
844 "set the decomposition (option -dd) manually.",
851 /*! \brief Return the number of PME-only ranks used by the simulation
853 * If the user did not choose a number, then decide for them. */
854 static int getNumPmeOnlyRanksToUse(const gmx::MDLogger& mdlog,
855 const gmx::DomdecOptions& options,
856 const gmx_mtop_t& mtop,
857 const t_inputrec& ir,
858 const gmx::SeparatePmeRanksPermitted& separatePmeRanksPermitted,
860 const int numRanksRequested)
862 int numPmeOnlyRanks = 0;
864 if (!(EEL_PME(ir.coulombtype) || EVDW_PME(ir.vdwtype)))
866 // System does not use PME for Electrostatics or LJ
869 .appendTextFormatted("The system does not use PME for electrostatics or LJ");
873 std::string extraMessage;
875 if (options.numPmeRanks >= 0)
877 numPmeOnlyRanks = options.numPmeRanks;
878 extraMessage += ", as requested with -npme option";
882 /* Disable PME-only ranks if some parts of the code requested so and it's up to GROMACS to decide */
883 if (!separatePmeRanksPermitted.permitSeparatePmeRanks())
886 extraMessage += " because: " + separatePmeRanksPermitted.reasonsWhyDisabled();
890 numPmeOnlyRanks = guess_npme(mdlog, mtop, ir, box, numRanksRequested);
891 extraMessage += ", as guessed by mdrun";
895 GMX_RELEASE_ASSERT(numPmeOnlyRanks <= numRanksRequested,
896 "Cannot have more PME ranks than total ranks");
899 .appendTextFormatted(
900 "Using %d separate PME ranks%s", numPmeOnlyRanks, extraMessage.c_str());
903 return numPmeOnlyRanks;
906 /*! \brief Sets the order of the DD dimensions, returns the number of DD dimensions */
907 static int set_dd_dim(const gmx::IVec& numDDCells, const DDSettings& ddSettings, ivec* dims)
910 if (ddSettings.useDDOrderZYX)
912 /* Decomposition order z,y,x */
913 for (int dim = DIM - 1; dim >= 0; dim--)
915 if (numDDCells[dim] > 1)
917 (*dims)[ndim++] = dim;
923 /* Decomposition order x,y,z */
924 for (int dim = 0; dim < DIM; dim++)
926 if (numDDCells[dim] > 1)
928 (*dims)[ndim++] = dim;
935 /* Set dim[0] to avoid extra checks on ndim in several places */
942 DDGridSetup getDDGridSetup(const gmx::MDLogger& mdlog,
944 MPI_Comm communicator,
945 const int numRanksRequested,
946 const DomdecOptions& options,
947 const DDSettings& ddSettings,
948 const DDSystemInfo& systemInfo,
949 const real cellSizeLimit,
950 const gmx_mtop_t& mtop,
951 const t_inputrec& ir,
952 const gmx::SeparatePmeRanksPermitted& separatePmeRanksPermitted,
954 gmx::ArrayRef<const gmx::RVec> xGlobal,
957 int numPmeOnlyRanks = getNumPmeOnlyRanksToUse(
958 mdlog, options, mtop, ir, separatePmeRanksPermitted, box, numRanksRequested);
960 gmx::IVec numDomains;
961 if (options.numCells[XX] > 0)
963 numDomains = gmx::IVec(options.numCells);
964 const ivec numDomainsLegacyIvec = { numDomains[XX], numDomains[YY], numDomains[ZZ] };
965 set_ddbox_cr(ddRole, communicator, &numDomainsLegacyIvec, ir, box, xGlobal, ddbox);
969 set_ddbox_cr(ddRole, communicator, nullptr, ir, box, xGlobal, ddbox);
971 if (ddRole == DDRole::Master)
973 numDomains = optimizeDDCells(
974 mdlog, numRanksRequested, numPmeOnlyRanks, cellSizeLimit, mtop, box, *ddbox, ir, systemInfo);
978 /* Communicate the information set by the master to all ranks */
979 gmx_bcast(sizeof(numDomains), numDomains, communicator);
980 if (EEL_PME(ir.coulombtype))
982 gmx_bcast(sizeof(numPmeOnlyRanks), &numPmeOnlyRanks, communicator);
985 DDGridSetup ddGridSetup;
986 ddGridSetup.numPmeOnlyRanks = numPmeOnlyRanks;
987 ddGridSetup.numDomains[XX] = numDomains[XX];
988 ddGridSetup.numDomains[YY] = numDomains[YY];
989 ddGridSetup.numDomains[ZZ] = numDomains[ZZ];
990 ddGridSetup.numDDDimensions = set_dd_dim(numDomains, ddSettings, &ddGridSetup.ddDimensions);