2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief This file contains function definitions necessary for
38 * managing automatic load balance of PME calculations (Coulomb and
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_ewald
46 #include "pme-load-balancing.h"
56 #include "gromacs/domdec/domdec.h"
57 #include "gromacs/domdec/domdec_network.h"
58 #include "gromacs/legacyheaders/calcgrid.h"
59 #include "gromacs/legacyheaders/force.h"
60 #include "gromacs/legacyheaders/md_logging.h"
61 #include "gromacs/legacyheaders/network.h"
62 #include "gromacs/legacyheaders/sim_util.h"
63 #include "gromacs/legacyheaders/types/commrec.h"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/timing/wallcycle.h"
68 #include "gromacs/utility/cstringutil.h"
69 #include "gromacs/utility/smalloc.h"
71 #include "pme-internal.h"
73 /*! \brief Parameters and settings for one PP-PME setup */
75 real rcut_coulomb; /**< Coulomb cut-off */
76 real rlist; /**< pair-list cut-off */
77 real rlistlong; /**< LR pair-list cut-off */
78 int nstcalclr; /**< frequency of evaluating long-range forces for group scheme */
79 real spacing; /**< (largest) PME grid spacing */
80 ivec grid; /**< the PME grid dimensions */
81 real grid_efficiency; /**< ineffiency factor for non-uniform grids <= 1 */
82 real ewaldcoeff_q; /**< Electrostatic Ewald coefficient */
83 real ewaldcoeff_lj; /**< LJ Ewald coefficient, only for the call to send_switchgrid */
84 struct gmx_pme_t *pmedata; /**< the data structure used in the PME code */
85 int count; /**< number of times this setup has been timed */
86 double cycles; /**< the fastest time for this setup in cycles */
89 /*! \brief After 50 nstlist periods of not observing imbalance: never tune PME */
90 const int PMETunePeriod = 50;
91 /*! \brief Trigger PME load balancing at more than 5% PME overload */
92 const real loadBalanceTriggerFactor = 1.05;
93 /*! \brief In the initial scan, step by grids that are at least a factor 0.8 coarser */
94 const real gridScaleFactor = 0.8;
95 /*! \brief In the initial scan, try to skip grids with uneven x/y/z spacing,
96 * checking if the "efficiency" is more than 5% worse than the previous grid.
98 const real relativeEfficiencyFactor = 1.05;
99 /*! \brief Rerun until a run is 12% slower setups than the fastest run so far */
100 const real maxRelativeSlowdownAccepted = 1.12;
101 /*! \brief If setups get more than 2% faster, do another round to avoid
102 * choosing a slower setup due to acceleration or fluctuations.
104 const real maxFluctuationAccepted = 1.02;
106 /*! \brief Enumeration whose values describe the effect limiting the load balancing */
108 epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimPMEGRID, epmelblimNR
111 /*! \brief Descriptive strings matching ::epmelb */
112 const char *pmelblim_str[epmelblimNR] =
113 { "no", "box size", "domain decompostion", "PME grid restriction" };
115 struct pme_load_balancing_t {
116 gmx_bool bSepPMERanks; /**< do we have separate PME ranks? */
117 gmx_bool bActive; /**< is PME tuning active? */
118 gmx_bool bBalance; /**< are we in the balancing phase, i.e. trying different setups? */
119 int nstage; /**< the current maximum number of stages */
121 real cut_spacing; /**< the minimum cutoff / PME grid spacing ratio */
122 real rcut_vdw; /**< Vdw cutoff (does not change) */
123 real rcut_coulomb_start; /**< Initial electrostatics cutoff */
124 int nstcalclr_start; /**< Initial electrostatics cutoff */
125 real rbuf_coulomb; /**< the pairlist buffer size */
126 real rbuf_vdw; /**< the pairlist buffer size */
127 matrix box_start; /**< the initial simulation box */
128 int n; /**< the count of setup as well as the allocation size */
129 pme_setup_t *setup; /**< the PME+cutoff setups */
130 int cur; /**< the current setup */
131 int fastest; /**< fastest setup up till now */
132 int start; /**< start of setup range to consider in stage>0 */
133 int end; /**< end of setup range to consider in stage>0 */
134 int elimited; /**< was the balancing limited, uses enum above */
135 int cutoff_scheme; /**< Verlet or group cut-offs */
137 int stage; /**< the current stage */
139 int cycles_n; /**< step cycle counter cummulative count */
140 double cycles_c; /**< step cycle counter cummulative cycles */
143 void pme_loadbal_init(pme_load_balancing_t **pme_lb_p,
144 const t_inputrec *ir, matrix box,
145 const interaction_const_t *ic,
146 struct gmx_pme_t *pmedata,
147 gmx_bool bUseGPU, gmx_bool bSepPMERanks,
150 pme_load_balancing_t *pme_lb;
156 pme_lb->bSepPMERanks = bSepPMERanks;
158 /* Any number of stages >= 2 is supported */
161 pme_lb->cutoff_scheme = ir->cutoff_scheme;
163 if (pme_lb->cutoff_scheme == ecutsVERLET)
165 pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
166 pme_lb->rbuf_vdw = pme_lb->rbuf_coulomb;
170 if (ic->rcoulomb > ic->rlist)
172 pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
176 pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
178 if (ic->rvdw > ic->rlist)
180 pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
184 pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
188 copy_mat(box, pme_lb->box_start);
189 if (ir->ePBC == epbcXY && ir->nwall == 2)
191 svmul(ir->wall_ewald_zfac, pme_lb->box_start[ZZ], pme_lb->box_start[ZZ]);
195 snew(pme_lb->setup, pme_lb->n);
197 pme_lb->rcut_vdw = ic->rvdw;
198 pme_lb->rcut_coulomb_start = ir->rcoulomb;
199 pme_lb->nstcalclr_start = ir->nstcalclr;
202 pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
203 pme_lb->setup[0].rlist = ic->rlist;
204 pme_lb->setup[0].rlistlong = ic->rlistlong;
205 pme_lb->setup[0].nstcalclr = ir->nstcalclr;
206 pme_lb->setup[0].grid[XX] = ir->nkx;
207 pme_lb->setup[0].grid[YY] = ir->nky;
208 pme_lb->setup[0].grid[ZZ] = ir->nkz;
209 pme_lb->setup[0].ewaldcoeff_q = ic->ewaldcoeff_q;
210 pme_lb->setup[0].ewaldcoeff_lj = ic->ewaldcoeff_lj;
212 pme_lb->setup[0].pmedata = pmedata;
215 for (d = 0; d < DIM; d++)
217 sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
223 pme_lb->setup[0].spacing = spm;
225 if (ir->fourier_spacing > 0)
227 pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
231 pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
239 pme_lb->elimited = epmelblimNO;
241 pme_lb->cycles_n = 0;
242 pme_lb->cycles_c = 0;
244 /* Tune with GPUs and/or separate PME ranks.
245 * When running only on a CPU without PME ranks, PME tuning will only help
246 * with small numbers of atoms in the cut-off sphere.
248 pme_lb->bActive = (wallcycle_have_counter() && (bUseGPU || bSepPMERanks));
250 /* With GPUs and no separate PME ranks we can't measure the PP/PME
251 * imbalance, so we start balancing right away.
252 * Otherwise we only start balancing after we observe imbalance.
254 pme_lb->bBalance = (pme_lb->bActive && (bUseGPU && !bSepPMERanks));
258 *bPrinting = pme_lb->bBalance;
261 /*! \brief Try to increase the cutoff during load balancing */
262 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t *pme_lb,
264 const gmx_domdec_t *dd)
267 int npmeranks_x, npmeranks_y;
269 real tmpr_coulomb, tmpr_vdw;
273 /* Try to add a new setup with next larger cut-off to the list */
275 srenew(pme_lb->setup, pme_lb->n);
276 set = &pme_lb->setup[pme_lb->n-1];
279 get_pme_nnodes(dd, &npmeranks_x, &npmeranks_y);
284 /* Avoid infinite while loop, which can occur at the minimum grid size.
285 * Note that in practice load balancing will stop before this point.
286 * The factor 2.1 allows for the extreme case in which only grids
287 * of powers of 2 are allowed (the current code supports more grids).
297 clear_ivec(set->grid);
298 sp = calc_grid(NULL, pme_lb->box_start,
299 fac*pme_lb->setup[pme_lb->cur].spacing,
304 /* As here we can't easily check if one of the PME ranks
305 * uses threading, we do a conservative grid check.
306 * This means we can't use pme_order or less grid lines
307 * per PME rank along x, which is not a strong restriction.
309 gmx_pme_check_restrictions(pme_order,
310 set->grid[XX], set->grid[YY], set->grid[ZZ],
311 npmeranks_x, npmeranks_y,
316 while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
318 set->rcut_coulomb = pme_lb->cut_spacing*sp;
319 if (set->rcut_coulomb < pme_lb->rcut_coulomb_start)
321 /* This is unlikely, but can happen when e.g. continuing from
322 * a checkpoint after equilibration where the box shrank a lot.
323 * We want to avoid rcoulomb getting smaller than rvdw
324 * and there might be more issues with decreasing rcoulomb.
326 set->rcut_coulomb = pme_lb->rcut_coulomb_start;
329 if (pme_lb->cutoff_scheme == ecutsVERLET)
331 set->rlist = set->rcut_coulomb + pme_lb->rbuf_coulomb;
332 /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
333 set->rlistlong = set->rlist;
337 tmpr_coulomb = set->rcut_coulomb + pme_lb->rbuf_coulomb;
338 tmpr_vdw = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
339 set->rlist = std::min(tmpr_coulomb, tmpr_vdw);
340 set->rlistlong = std::max(tmpr_coulomb, tmpr_vdw);
342 /* Set the long-range update frequency */
343 if (set->rlist == set->rlistlong)
345 /* No long-range interactions if the short-/long-range cutoffs are identical */
348 else if (pme_lb->nstcalclr_start == 0 || pme_lb->nstcalclr_start == 1)
350 /* We were not doing long-range before, but now we are since rlist!=rlistlong */
355 /* We were already doing long-range interactions from the start */
356 if (pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
358 /* We were originally doing long-range VdW-only interactions.
359 * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
360 * but if the coulomb cutoff has become longer we should update the long-range
363 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
367 /* We were not doing any long-range interaction from the start,
368 * since it is not possible to do twin-range coulomb for the PME interaction.
376 /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
377 set->grid_efficiency = 1;
378 for (d = 0; d < DIM; d++)
380 set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
382 /* The Ewald coefficient is inversly proportional to the cut-off */
384 pme_lb->setup[0].ewaldcoeff_q*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
385 /* We set ewaldcoeff_lj in set, even when LJ-PME is not used */
387 pme_lb->setup[0].ewaldcoeff_lj*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
394 fprintf(debug, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
395 set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb);
400 /*! \brief Print the PME grid */
401 static void print_grid(FILE *fp_err, FILE *fp_log,
404 const pme_setup_t *set,
407 char buf[STRLEN], buft[STRLEN];
411 sprintf(buft, ": %.1f M-cycles", cycles*1e-6);
417 sprintf(buf, "%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
419 desc, set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb,
423 fprintf(fp_err, "\r%s\n", buf);
427 fprintf(fp_log, "%s\n", buf);
431 /*! \brief Return the index of the last setup used in PME load balancing */
432 static int pme_loadbal_end(pme_load_balancing_t *pme_lb)
434 /* In the initial stage only n is set; end is not set yet */
445 /*! \brief Print descriptive string about what limits PME load balancing */
446 static void print_loadbal_limited(FILE *fp_err, FILE *fp_log,
448 pme_load_balancing_t *pme_lb)
450 char buf[STRLEN], sbuf[22];
452 sprintf(buf, "step %4s: the %s limits the PME load balancing to a coulomb cut-off of %.3f",
453 gmx_step_str(step, sbuf),
454 pmelblim_str[pme_lb->elimited],
455 pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
458 fprintf(fp_err, "\r%s\n", buf);
462 fprintf(fp_log, "%s\n", buf);
466 /*! \brief Switch load balancing to stage 1
468 * In this stage, only reasonably fast setups are run again. */
469 static void switch_to_stage1(pme_load_balancing_t *pme_lb)
472 while (pme_lb->start+1 < pme_lb->n &&
473 (pme_lb->setup[pme_lb->start].count == 0 ||
474 pme_lb->setup[pme_lb->start].cycles >
475 pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted))
479 while (pme_lb->start > 0 && pme_lb->setup[pme_lb->start-1].cycles == 0)
484 pme_lb->end = pme_lb->n;
485 if (pme_lb->setup[pme_lb->end-1].count > 0 &&
486 pme_lb->setup[pme_lb->end-1].cycles >
487 pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted)
494 /* Next we want to choose setup pme_lb->start, but as we will increase
495 * pme_ln->cur by one right after returning, we subtract 1 here.
497 pme_lb->cur = pme_lb->start - 1;
500 /*! \brief Try to adjust the PME grid and Coulomb cut-off
502 * The adjustment is done to generate a different non-bonded PP and PME load.
503 * With separate PME ranks (PP and PME on different processes) or with
504 * a GPU (PP on GPU, PME on CPU), PP and PME run on different resources
505 * and changing the load will affect the load balance and performance.
506 * The total time for a set of integration steps is monitored and a range
507 * of grid/cut-off setups is scanned. After calling pme_load_balance many
508 * times and acquiring enough statistics, the best performing setup is chosen.
509 * Here we try to take into account fluctuations and changes due to external
510 * factors as well as DD load balancing.
511 * Returns TRUE the load balancing continues, FALSE is the balancing is done.
514 pme_load_balance(pme_load_balancing_t *pme_lb,
521 interaction_const_t *ic,
522 struct nonbonded_verlet_t *nbv,
523 struct gmx_pme_t ** pmedata,
529 char buf[STRLEN], sbuf[22];
532 if (pme_lb->stage == pme_lb->nstage)
539 gmx_sumd(1, &cycles, cr);
540 cycles /= cr->nnodes;
543 set = &pme_lb->setup[pme_lb->cur];
546 rtab = ir->rlistlong + ir->tabext;
548 if (set->count % 2 == 1)
550 /* Skip the first cycle, because the first step after a switch
551 * is much slower due to allocation and/or caching effects.
556 sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
557 print_grid(fp_err, fp_log, buf, "timed with", set, cycles);
561 set->cycles = cycles;
565 if (cycles*maxFluctuationAccepted < set->cycles &&
566 pme_lb->stage == pme_lb->nstage - 1)
568 /* The performance went up a lot (due to e.g. DD load balancing).
569 * Add a stage, keep the minima, but rescan all setups.
575 fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
576 "Increased the number stages to %d"
577 " and ignoring the previous performance\n",
578 set->grid[XX], set->grid[YY], set->grid[ZZ],
579 cycles*1e-6, set->cycles*1e-6, maxFluctuationAccepted,
583 set->cycles = std::min(set->cycles, cycles);
586 if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
588 pme_lb->fastest = pme_lb->cur;
590 if (DOMAINDECOMP(cr))
592 /* We found a new fastest setting, ensure that with subsequent
593 * shorter cut-off's the dynamic load balancing does not make
594 * the use of the current cut-off impossible. This solution is
595 * a trade-off, as the PME load balancing and DD domain size
596 * load balancing can interact in complex ways.
597 * With the Verlet kernels, DD load imbalance will usually be
598 * mainly due to bonded interaction imbalance, which will often
599 * quickly push the domain boundaries beyond the limit for the
600 * optimal, PME load balanced, cut-off. But it could be that
601 * better overal performance can be obtained with a slightly
602 * shorter cut-off and better DD load balancing.
604 change_dd_dlb_cutoff_limit(cr);
607 cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
609 /* Check in stage 0 if we should stop scanning grids.
610 * Stop when the time is more than maxRelativeSlowDownAccepted longer than the fastest.
612 if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
613 cycles > pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted)
615 pme_lb->n = pme_lb->cur + 1;
616 /* Done with scanning, go to stage 1 */
617 switch_to_stage1(pme_lb);
620 if (pme_lb->stage == 0)
624 gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
628 if (pme_lb->cur+1 < pme_lb->n)
630 /* We had already generated the next setup */
635 /* Find the next setup */
636 OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);
640 pme_lb->elimited = epmelblimPMEGRID;
644 if (OK && ir->ePBC != epbcNONE)
646 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
647 <= max_cutoff2(ir->ePBC, state->box));
650 pme_lb->elimited = epmelblimBOX;
658 if (DOMAINDECOMP(cr))
660 OK = change_dd_cutoff(cr, state, ir,
661 pme_lb->setup[pme_lb->cur].rlistlong);
664 /* Failed: do not use this setup */
666 pme_lb->elimited = epmelblimDD;
672 /* We hit the upper limit for the cut-off,
673 * the setup should not go further than cur.
675 pme_lb->n = pme_lb->cur + 1;
676 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
677 /* Switch to the next stage */
678 switch_to_stage1(pme_lb);
682 !(pme_lb->setup[pme_lb->cur].grid[XX]*
683 pme_lb->setup[pme_lb->cur].grid[YY]*
684 pme_lb->setup[pme_lb->cur].grid[ZZ] <
685 gridsize_start*gridScaleFactor
687 pme_lb->setup[pme_lb->cur].grid_efficiency <
688 pme_lb->setup[pme_lb->cur-1].grid_efficiency*relativeEfficiencyFactor));
691 if (pme_lb->stage > 0 && pme_lb->end == 1)
694 pme_lb->stage = pme_lb->nstage;
696 else if (pme_lb->stage > 0 && pme_lb->end > 1)
698 /* If stage = nstage-1:
699 * scan over all setups, rerunning only those setups
700 * which are not much slower than the fastest
707 if (pme_lb->cur == pme_lb->end)
710 pme_lb->cur = pme_lb->start;
713 while (pme_lb->stage == pme_lb->nstage - 1 &&
714 pme_lb->setup[pme_lb->cur].count > 0 &&
715 pme_lb->setup[pme_lb->cur].cycles > cycles_fast*maxRelativeSlowdownAccepted);
717 if (pme_lb->stage == pme_lb->nstage)
719 /* We are done optimizing, use the fastest setup we found */
720 pme_lb->cur = pme_lb->fastest;
724 if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
726 OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistlong);
729 /* Failsafe solution */
730 if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
736 pme_lb->end = pme_lb->cur;
737 pme_lb->cur = pme_lb->start;
738 pme_lb->elimited = epmelblimDD;
739 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
743 /* Change the Coulomb cut-off and the PME grid */
745 set = &pme_lb->setup[pme_lb->cur];
747 ic->rcoulomb = set->rcut_coulomb;
748 ic->rlist = set->rlist;
749 ic->rlistlong = set->rlistlong;
750 ir->nstcalclr = set->nstcalclr;
751 ic->ewaldcoeff_q = set->ewaldcoeff_q;
752 /* TODO: centralize the code that sets the potentials shifts */
753 if (ic->coulomb_modifier == eintmodPOTSHIFT)
755 ic->sh_ewald = gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb);
757 if (EVDW_PME(ic->vdwtype))
759 /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
760 ic->rvdw = set->rcut_coulomb;
761 ic->ewaldcoeff_lj = set->ewaldcoeff_lj;
762 if (ic->vdw_modifier == eintmodPOTSHIFT)
766 ic->dispersion_shift.cpot = -std::pow(static_cast<double>(ic->rvdw), -6.0);
767 ic->repulsion_shift.cpot = -std::pow(static_cast<double>(ic->rvdw), -12.0);
768 ic->sh_invrc6 = -ic->dispersion_shift.cpot;
769 crc2 = sqr(ic->ewaldcoeff_lj*ic->rvdw);
770 ic->sh_lj_ewald = (exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)*std::pow(static_cast<double>(ic->rvdw), -6.0);
774 /* We always re-initialize the tables whether they are used or not */
775 init_interaction_const_tables(NULL, ic, rtab);
777 nbnxn_gpu_pme_loadbal_update_param(nbv, ic);
779 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
780 * also sharing texture references. To keep the code simple, we don't
781 * treat texture references as shared resources, but this means that
782 * the coulomb_tab texture ref will get updated by multiple threads.
783 * Hence, to ensure that the non-bonded kernels don't start before all
784 * texture binding operations are finished, we need to wait for all ranks
785 * to arrive here before continuing.
787 * Note that we could omit this barrier if GPUs are not shared (or
788 * texture objects are used), but as this is initialization code, there
789 * is not point in complicating things.
791 #ifdef GMX_THREAD_MPI
792 if (PAR(cr) && use_GPU(nbv))
796 #endif /* GMX_THREAD_MPI */
798 if (!pme_lb->bSepPMERanks)
800 if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
802 /* Generate a new PME data structure,
803 * copying part of the old pointers.
805 gmx_pme_reinit(&set->pmedata,
806 cr, pme_lb->setup[0].pmedata, ir,
809 *pmedata = set->pmedata;
813 /* Tell our PME-only rank to switch grid */
814 gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
819 print_grid(NULL, debug, "", "switched to", set, -1);
822 if (pme_lb->stage == pme_lb->nstage)
824 print_grid(fp_err, fp_log, "", "optimal", set, -1);
830 void pme_loadbal_do(pme_load_balancing_t *pme_lb,
837 gmx_wallcycle_t wcycle,
839 gmx_int64_t step_rel,
845 assert(pme_lb != NULL);
847 if (!pme_lb->bActive)
852 n_prev = pme_lb->cycles_n;
853 cycles_prev = pme_lb->cycles_c;
854 wallcycle_get(wcycle, ewcSTEP, &pme_lb->cycles_n, &pme_lb->cycles_c);
855 if (pme_lb->cycles_n == 0)
857 /* Before the first step we haven't done any steps yet */
860 /* Sanity check, we expect nstlist cycle counts */
861 if (pme_lb->cycles_n - n_prev != ir->nstlist)
863 /* We could return here, but it's safer to issue and error and quit */
864 gmx_incons("pme_loadbal_do called at an interval != nstlist");
867 /* PME grid + cut-off optimization with GPUs or PME ranks */
868 if (!pme_lb->bBalance && pme_lb->bSepPMERanks)
870 if (DDMASTER(cr->dd))
872 /* PME rank load is too high, start tuning */
873 pme_lb->bBalance = (dd_pme_f_ratio(cr->dd) >= loadBalanceTriggerFactor);
875 dd_bcast(cr->dd, sizeof(gmx_bool), &pme_lb->bBalance);
877 if (pme_lb->bBalance &&
878 use_GPU(fr->nbv) && DOMAINDECOMP(cr) &&
879 pme_lb->bSepPMERanks)
881 /* Lock DLB=auto to off (does nothing when DLB=yes/no).
882 * With GPUs + separate PME ranks, we don't want DLB.
883 * This could happen when we scan coarse grids and
884 * it would then never be turned off again.
885 * This would hurt performance at the final, optimal
886 * grid spacing, where DLB almost never helps.
887 * Also, DLB can limit the cut-off for PME tuning.
889 dd_dlb_set_lock(cr->dd, TRUE);
893 if (pme_lb->bBalance)
895 /* init_step might not be a multiple of nstlist,
896 * but the first cycle is always skipped anyhow.
899 pme_load_balance(pme_lb, cr,
901 ir, state, pme_lb->cycles_c - cycles_prev,
902 fr->ic, fr->nbv, &fr->pmedata,
905 /* Update constants in forcerec/inputrec to keep them in sync with fr->ic */
906 fr->ewaldcoeff_q = fr->ic->ewaldcoeff_q;
907 fr->ewaldcoeff_lj = fr->ic->ewaldcoeff_lj;
908 fr->rlist = fr->ic->rlist;
909 fr->rlistlong = fr->ic->rlistlong;
910 fr->rcoulomb = fr->ic->rcoulomb;
911 fr->rvdw = fr->ic->rvdw;
913 if (ir->eDispCorr != edispcNO)
915 calc_enervirdiff(NULL, ir->eDispCorr, fr);
918 if (!pme_lb->bBalance &&
920 dd_dlb_is_locked(cr->dd))
922 /* Unlock the DLB=auto, DLB is allowed to activate
923 * (but we don't expect it to activate in most cases).
925 dd_dlb_set_lock(cr->dd, FALSE);
929 if (!pme_lb->bBalance &&
930 (!pme_lb->bSepPMERanks || (step_rel <= PMETunePeriod*ir->nstlist)))
932 /* We have just deactivated the balancing and we're not measuring PP/PME
933 * imbalance during the first 50*nstlist steps: deactivate the tuning.
935 pme_lb->bActive = FALSE;
938 *bPrinting = pme_lb->bBalance;
941 void restart_pme_loadbal(pme_load_balancing_t *pme_lb, int n)
946 /*! \brief Return product of the number of PME grid points in each dimension */
947 static int pme_grid_points(const pme_setup_t *setup)
949 return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
952 /*! \brief Retuern the largest short-range list cut-off radius */
953 static real pme_loadbal_rlist(const pme_setup_t *setup)
955 /* With the group cut-off scheme we can have twin-range either
956 * for Coulomb or for VdW, so we need a check here.
957 * With the Verlet cut-off scheme rlist=rlistlong.
959 if (setup->rcut_coulomb > setup->rlist)
961 return setup->rlistlong;
969 /*! \brief Print one load-balancing setting */
970 static void print_pme_loadbal_setting(FILE *fplog,
972 const pme_setup_t *setup)
975 " %-7s %6.3f nm %6.3f nm %3d %3d %3d %5.3f nm %5.3f nm\n",
977 setup->rcut_coulomb, pme_loadbal_rlist(setup),
978 setup->grid[XX], setup->grid[YY], setup->grid[ZZ],
979 setup->spacing, 1/setup->ewaldcoeff_q);
982 /*! \brief Print all load-balancing settings */
983 static void print_pme_loadbal_settings(pme_load_balancing_t *pme_lb,
986 gmx_bool bNonBondedOnGPU)
988 double pp_ratio, grid_ratio;
989 real pp_ratio_temporary;
991 pp_ratio_temporary = pme_loadbal_rlist(&pme_lb->setup[pme_lb->cur])/pme_loadbal_rlist(&pme_lb->setup[0]);
992 pp_ratio = std::pow(static_cast<double>(pp_ratio_temporary), 3.0);
993 grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
994 (double)pme_grid_points(&pme_lb->setup[0]);
996 fprintf(fplog, "\n");
997 fprintf(fplog, " P P - P M E L O A D B A L A N C I N G\n");
998 fprintf(fplog, "\n");
999 /* Here we only warn when the optimal setting is the last one */
1000 if (pme_lb->elimited != epmelblimNO &&
1001 pme_lb->cur == pme_loadbal_end(pme_lb)-1)
1003 fprintf(fplog, " NOTE: The PP/PME load balancing was limited by the %s,\n",
1004 pmelblim_str[pme_lb->elimited]);
1005 fprintf(fplog, " you might not have reached a good load balance.\n");
1006 if (pme_lb->elimited == epmelblimDD)
1008 fprintf(fplog, " Try different mdrun -dd settings or lower the -dds value.\n");
1010 fprintf(fplog, "\n");
1012 fprintf(fplog, " PP/PME load balancing changed the cut-off and PME settings:\n");
1013 fprintf(fplog, " particle-particle PME\n");
1014 fprintf(fplog, " rcoulomb rlist grid spacing 1/beta\n");
1015 print_pme_loadbal_setting(fplog, "initial", &pme_lb->setup[0]);
1016 print_pme_loadbal_setting(fplog, "final", &pme_lb->setup[pme_lb->cur]);
1017 fprintf(fplog, " cost-ratio %4.2f %4.2f\n",
1018 pp_ratio, grid_ratio);
1019 fprintf(fplog, " (note that these numbers concern only part of the total PP and PME load)\n");
1021 if (pp_ratio > 1.5 && !bNonBondedOnGPU)
1023 md_print_warn(cr, fplog,
1024 "NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
1025 " For better performance, use (more) PME ranks (mdrun -npme),\n"
1026 " or if you are beyond the scaling limit, use fewer total ranks (or nodes).\n");
1030 fprintf(fplog, "\n");
1034 void pme_loadbal_done(pme_load_balancing_t *pme_lb,
1037 gmx_bool bNonBondedOnGPU)
1039 if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
1041 print_pme_loadbal_settings(pme_lb, cr, fplog, bNonBondedOnGPU);
1044 /* TODO: Here we should free all pointers in pme_lb,
1045 * but as it contains pme data structures,
1046 * we need to first make pme.c free all data.